diff options
Diffstat (limited to 'include')
225 files changed, 4807 insertions, 1981 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 3a10ef5914e..6cd5b6403a7 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -210,7 +210,7 @@ struct acpi_device_power_state { struct acpi_device_power { int state; /* Current state */ struct acpi_device_power_flags flags; - struct acpi_device_power_state states[4]; /* Power states (D0-D3) */ + struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ }; /* Performance Management */ diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index a756bc8d866..4543b6f7586 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h @@ -98,8 +98,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table, /* * Spinlock primitives */ + +#ifndef acpi_os_create_lock acpi_status acpi_os_create_lock(acpi_spinlock *out_handle); +#endif void acpi_os_delete_lock(acpi_spinlock handle); diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 5d2a5e9544d..2ce1be9f629 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -159,6 +159,24 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) } while (0) #endif +/* + * When lockdep is enabled, the spin_lock_init() macro stringifies it's + * argument and uses that as a name for the lock in debugging. + * By executing spin_lock_init() in a macro the key changes from "lock" for + * all locks to the name of the argument of acpi_os_create_lock(), which + * prevents lockdep from reporting false positives for ACPICA locks. + */ +#define acpi_os_create_lock(__handle) \ +({ \ + spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + \ + if (lock) { \ + *(__handle) = lock; \ + spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ +}) + #endif /* __KERNEL__ */ #endif /* __ACLINUX_H__ */ diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h index 4586fec75dd..0f79054ce7c 100644 --- a/include/asm-generic/delay.h +++ b/include/asm-generic/delay.h @@ -1,9 +1,44 @@ #ifndef __ASM_GENERIC_DELAY_H #define __ASM_GENERIC_DELAY_H +/* Undefined functions to get compile-time errors */ +extern void __bad_udelay(void); +extern void __bad_ndelay(void); + extern void __udelay(unsigned long usecs); +extern void __ndelay(unsigned long nsecs); +extern void __const_udelay(unsigned long xloops); extern void __delay(unsigned long loops); -#define udelay(n) __udelay(n) +/* + * The weird n/20000 thing suppresses a "comparison is always false due to + * limited range of data type" warning with non-const 8-bit arguments. + */ + +/* 0x10c7 is 2**32 / 1000000 (rounded up) */ +#define udelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_udelay(); \ + else \ + __const_udelay((n) * 0x10c7ul); \ + } else { \ + __udelay(n); \ + } \ + }) + +/* 0x5 is 2**32 / 1000000000 (rounded up) */ +#define ndelay(n) \ + ({ \ + if (__builtin_constant_p(n)) { \ + if ((n) / 20000 >= 1) \ + __bad_ndelay(); \ + else \ + __const_udelay((n) * 5ul); \ + } else { \ + __ndelay(n); \ + } \ + }) #endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index e0ffa3ddb02..912088773a6 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -307,7 +307,11 @@ static inline void *phys_to_virt(unsigned long address) /* * Change "struct page" to physical address. + * + * This implementation is for the no-MMU case only... if you have an MMU + * you'll need to provide your own definitions. */ +#ifndef CONFIG_MMU static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) { return (void __iomem*) (unsigned long)offset; @@ -326,7 +330,9 @@ static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) static inline void iounmap(void *addr) { } +#endif /* CONFIG_MMU */ +#ifdef CONFIG_HAS_IOPORT #ifndef CONFIG_GENERIC_IOMAP static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { @@ -340,9 +346,10 @@ static inline void ioport_unmap(void __iomem *p) extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *p); #endif /* CONFIG_GENERIC_IOMAP */ +#endif /* CONFIG_HAS_IOPORT */ #define xlate_dev_kmem_ptr(p) p -#define xlate_dev_mem_ptr(p) ((void *) (p)) +#define xlate_dev_mem_ptr(p) __va(p) #ifndef virt_to_bus static inline unsigned long virt_to_bus(volatile void *address) diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 76b0cc5637f..98dcd76ce83 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -56,17 +56,29 @@ extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long coun extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); +#ifdef CONFIG_HAS_IOPORT /* Create a virtual mapping cookie for an IO port range */ extern void __iomem *ioport_map(unsigned long port, unsigned int nr); extern void ioport_unmap(void __iomem *); +#endif #ifndef ARCH_HAS_IOREMAP_WC #define ioremap_wc ioremap_nocache #endif +#ifdef CONFIG_PCI /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ struct pci_dev; extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#else +struct pci_dev; +static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } +#endif #endif diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index c5813c87de0..d61c1117021 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -16,6 +16,7 @@ #include <linux/compiler.h> #include <linux/completion.h> #include <linux/if_alg.h> +#include <linux/scatterlist.h> #include <linux/types.h> #include <net/sock.h> diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index e08f344c6cf..3d53efd25ab 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -182,6 +182,7 @@ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ @@ -192,6 +193,7 @@ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h new file mode 100644 index 00000000000..a6863a2dec1 --- /dev/null +++ b/include/linux/amd-iommu.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel <joerg.roedel@amd.com> + * Leo Duran <leo.duran@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_H +#define _ASM_X86_AMD_IOMMU_H + +#include <linux/irqreturn.h> + +#ifdef CONFIG_AMD_IOMMU + +extern int amd_iommu_detect(void); + +#else + +static inline int amd_iommu_detect(void) { return -ENODEV; } + +#endif + +#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 69a21e0ebd3..8013a45242f 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -8,6 +8,8 @@ #ifndef _LINUX_ANON_INODES_H #define _LINUX_ANON_INODES_H +struct file_operations; + struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7d650a0e3d8..7216b0daf54 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -20,6 +20,7 @@ #include <linux/if_arcnet.h> #ifdef __KERNEL__ +#include <linux/irqreturn.h> #ifndef bool #define bool int diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h index 60a7c49dcb4..6e3f54f3784 100644 --- a/include/linux/ath9k_platform.h +++ b/include/linux/ath9k_platform.h @@ -30,6 +30,8 @@ struct ath9k_platform_data { u32 gpio_val; bool is_clk_25mhz; + int (*get_mac_revision)(void); + int (*external_reset)(void); }; #endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ee456c79b0e..bc6615d4132 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -34,6 +34,32 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) } #endif +#ifndef atomic_inc_unless_negative +static inline int atomic_inc_unless_negative(atomic_t *p) +{ + int v, v1; + for (v = 0; v >= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v + 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +#ifndef atomic_dec_unless_positive +static inline int atomic_dec_unless_positive(atomic_t *p) +{ + int v, v1; + for (v = 0; v <= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v - 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + #ifndef CONFIG_ARCH_HAS_ATOMIC_OR static inline void atomic_or(int i, atomic_t *v) { diff --git a/include/linux/audit.h b/include/linux/audit.h index 9d339eb2788..0c8006129fb 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -613,6 +613,12 @@ extern void audit_log_d_path(struct audit_buffer *ab, extern void audit_log_key(struct audit_buffer *ab, char *key); extern void audit_log_lost(const char *message); +#ifdef CONFIG_SECURITY +extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); +#else +#define audit_log_secctx(b,s) do { ; } while (0) +#endif + extern int audit_update_lsm_rules(void); /* Private API (for audit.c only) */ @@ -635,6 +641,7 @@ extern int audit_enabled; #define audit_log_untrustedstring(a,s) do { ; } while (0) #define audit_log_d_path(b, p, d) do { ; } while (0) #define audit_log_key(b, k) do { ; } while (0) +#define audit_log_secctx(b,s) do { ; } while (0) #define audit_enabled 0 #endif #endif diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 08763e4e848..8c96654bef1 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -6,6 +6,7 @@ #include <linux/bcma/bcma_driver_chipcommon.h> #include <linux/bcma/bcma_driver_pci.h> +#include <linux/ssb/ssb.h> /* SPROM sharing */ #include "bcma_regs.h" @@ -24,6 +25,11 @@ struct bcma_chipinfo { u8 pkg; }; +enum bcma_clkmode { + BCMA_CLKMODE_FAST, + BCMA_CLKMODE_DYNAMIC, +}; + struct bcma_host_ops { u8 (*read8)(struct bcma_device *core, u16 offset); u16 (*read16)(struct bcma_device *core, u16 offset); @@ -31,6 +37,12 @@ struct bcma_host_ops { void (*write8)(struct bcma_device *core, u16 offset, u8 value); void (*write16)(struct bcma_device *core, u16 offset, u16 value); void (*write32)(struct bcma_device *core, u16 offset, u32 value); +#ifdef CONFIG_BCMA_BLOCKIO + void (*block_read)(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width); + void (*block_write)(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width); +#endif /* Agent ops */ u32 (*aread32)(struct bcma_device *core, u16 offset); void (*awrite32)(struct bcma_device *core, u16 offset, u32 value); @@ -117,6 +129,8 @@ struct bcma_device { struct bcma_device_id id; struct device dev; + struct device *dma_dev; + unsigned int irq; bool dev_registered; u8 core_index; @@ -179,6 +193,10 @@ struct bcma_bus { struct bcma_drv_cc drv_cc; struct bcma_drv_pci drv_pci; + + /* We decided to share SPROM struct with SSB as long as we do not need + * any hacks for BCMA. This simplifies drivers code. */ + struct ssb_sprom sprom; }; extern inline u32 bcma_read8(struct bcma_device *core, u16 offset) @@ -208,6 +226,18 @@ void bcma_write32(struct bcma_device *core, u16 offset, u32 value) { core->bus->ops->write32(core, offset, value); } +#ifdef CONFIG_BCMA_BLOCKIO +extern inline void bcma_block_read(struct bcma_device *core, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + core->bus->ops->block_read(core, buffer, count, offset, reg_width); +} +extern inline void bcma_block_write(struct bcma_device *core, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + core->bus->ops->block_write(core, buffer, count, offset, reg_width); +} +#endif extern inline u32 bcma_aread32(struct bcma_device *core, u16 offset) { return core->bus->ops->aread32(core, offset); @@ -218,7 +248,24 @@ void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value) core->bus->ops->awrite32(core, offset, value); } +#define bcma_mask32(cc, offset, mask) \ + bcma_write32(cc, offset, bcma_read32(cc, offset) & (mask)) +#define bcma_set32(cc, offset, set) \ + bcma_write32(cc, offset, bcma_read32(cc, offset) | (set)) +#define bcma_maskset32(cc, offset, mask, set) \ + bcma_write32(cc, offset, (bcma_read32(cc, offset) & (mask)) | (set)) + extern bool bcma_core_is_enabled(struct bcma_device *core); +extern void bcma_core_disable(struct bcma_device *core, u32 flags); extern int bcma_core_enable(struct bcma_device *core, u32 flags); +extern void bcma_core_set_clockmode(struct bcma_device *core, + enum bcma_clkmode clkmode); +extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, + bool on); +#define BCMA_DMA_TRANSLATION_MASK 0xC0000000 +#define BCMA_DMA_TRANSLATION_NONE 0x00000000 +#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */ +#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ +extern u32 bcma_core_dma_translation(struct bcma_device *core); #endif /* LINUX_BCMA_H_ */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 083c3b6cd5c..a0f684615ae 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -179,15 +179,7 @@ #define BCMA_CC_PROG_WAITCNT 0x0124 #define BCMA_CC_FLASH_CFG 0x0128 #define BCMA_CC_FLASH_WAITCNT 0x012C -#define BCMA_CC_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ -#define BCMA_CC_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ -#define BCMA_CC_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ -#define BCMA_CC_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ -#define BCMA_CC_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ -#define BCMA_CC_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ -#define BCMA_CC_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ -#define BCMA_CC_CLKCTLST_HAVEHT 0x00010000 /* HT available */ -#define BCMA_CC_CLKCTLST_HAVEALP 0x00020000 /* APL available */ +/* 0x1E0 is defined as shared BCMA_CLKCTLST */ #define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ #define BCMA_CC_UART0_DATA 0x0300 #define BCMA_CC_UART0_IMR 0x0304 @@ -244,6 +236,8 @@ #define BCMA_CC_REGCTL_DATA 0x065C #define BCMA_CC_PLLCTL_ADDR 0x0660 #define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ +#define BCMA_CC_SPROM_PCIE6 0x0830 /* SPROM beginning on PCIe rev >= 6 */ /* Data for the PMU, if available. * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index b7e191cf00e..3871b668caf 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -85,5 +85,7 @@ struct bcma_drv_pci { #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) extern void bcma_core_pci_init(struct bcma_drv_pci *pc); +extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, + struct bcma_device *core, bool enable); #endif /* LINUX_BCMA_DRIVER_PCI_H_ */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index f82d88a960c..9faae2ae02e 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -1,13 +1,38 @@ #ifndef LINUX_BCMA_REGS_H_ #define LINUX_BCMA_REGS_H_ +/* Some single registers are shared between many cores */ +/* BCMA_CLKCTLST: ChipCommon (rev >= 20), PCIe, 80211 */ +#define BCMA_CLKCTLST 0x01E0 /* Clock control and status */ +#define BCMA_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define BCMA_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define BCMA_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */ +#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */ +#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */ +#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */ +#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */ +#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */ +/* Is there any BCM4328 on BCMA bus? */ +#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */ +#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ + /* Agent registers (common for every core) */ -#define BCMA_IOCTL 0x0408 +#define BCMA_IOCTL 0x0408 /* IO control */ #define BCMA_IOCTL_CLK 0x0001 #define BCMA_IOCTL_FGC 0x0002 #define BCMA_IOCTL_CORE_BITS 0x3FFC #define BCMA_IOCTL_PME_EN 0x4000 #define BCMA_IOCTL_BIST_EN 0x8000 +#define BCMA_IOST 0x0500 /* IO status */ +#define BCMA_IOST_CORE_BITS 0x0FFF +#define BCMA_IOST_DMA64 0x1000 +#define BCMA_IOST_GATED_CLK 0x2000 +#define BCMA_IOST_BIST_ERROR 0x4000 +#define BCMA_IOST_BIST_DONE 0x8000 #define BCMA_RESET_CTL 0x0800 #define BCMA_RESET_CTL_RESET 0x0001 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8845613fd7e..fd88a3945aa 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -111,6 +111,7 @@ extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *, struct pt_regs *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); +extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index b4326bfa684..564d997e216 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -88,7 +88,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) { #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) return test_bit(bitnum, addr); -#elif defined CONFIG_PREEMPT +#elif defined CONFIG_PREEMPT_COUNT return preempt_count(); #else return 1; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1a23722e887..0e67c45b3bc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -73,7 +73,7 @@ enum rq_cmd_type_bits { /* * try to put the fields that are referenced together in the same cacheline. - * if you modify this structure, be sure to check block/blk-core.c:rq_init() + * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() * as well! */ struct request { @@ -260,8 +260,7 @@ struct queue_limits { unsigned char discard_zeroes_data; }; -struct request_queue -{ +struct request_queue { /* * Together with queue_head for cacheline sharing */ @@ -304,14 +303,14 @@ struct request_queue void *queuedata; /* - * queue needs bounce pages for pages above this limit + * various queue flags, see QUEUE_* below */ - gfp_t bounce_gfp; + unsigned long queue_flags; /* - * various queue flags, see QUEUE_* below + * queue needs bounce pages for pages above this limit */ - unsigned long queue_flags; + gfp_t bounce_gfp; /* * protects queue structures from reentrancy. ->__queue_lock should @@ -334,8 +333,8 @@ struct request_queue unsigned int nr_congestion_off; unsigned int nr_batching; - void *dma_drain_buffer; unsigned int dma_drain_size; + void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; @@ -393,7 +392,7 @@ struct request_queue #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ -#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ +#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ @@ -403,6 +402,7 @@ struct request_queue #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ +#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -857,12 +857,21 @@ struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); +/* + * Note: Code in between changing the blk_plug list/cb_list or element of such + * lists is preemptable, but such code can't do sleep (or be very careful), + * otherwise data is corrupted. For details, please check schedule() where + * blk_schedule_flush_plug() is called. + */ struct blk_plug { unsigned long magic; struct list_head list; struct list_head cb_list; unsigned int should_sort; + unsigned int count; }; +#define BLK_MAX_REQUEST_COUNT 16 + struct blk_plug_cb { struct list_head list; void (*callback)(struct blk_plug_cb *); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 18a1baf31f2..139c4db55f1 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -22,6 +22,10 @@ typedef u64 cycle_t; struct clocksource; +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA +#include <asm/clocksource.h> +#endif + /** * struct cyclecounter - hardware abstraction for a free running counter * Provides completely state-free accessors to the underlying hardware. @@ -153,7 +157,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @shift: cycle to nanosecond divisor (power of two) * @max_idle_ns: max idle time permitted by the clocksource (nsecs) * @flags: flags describing special properties - * @vread: vsyscall based read + * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary */ @@ -169,16 +173,13 @@ struct clocksource { u32 shift; u64 max_idle_ns; -#ifdef CONFIG_IA64 - void *fsys_mmio; /* used by fsyscall asm code */ -#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) -#else -#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) +#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA + struct arch_clocksource_data archdata; #endif + const char *name; struct list_head list; int rating; - cycle_t (*vread)(void); int (*enable)(struct clocksource *cs); void (*disable)(struct clocksource *cs); unsigned long flags; diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index 47dac5ea8d3..12c517b51ca 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -53,6 +53,7 @@ struct proc_event { PROC_EVENT_UID = 0x00000004, PROC_EVENT_GID = 0x00000040, PROC_EVENT_SID = 0x00000080, + PROC_EVENT_PTRACE = 0x00000100, /* "next" should be 0x00000400 */ /* "last" is the last process event: exit */ PROC_EVENT_EXIT = 0x80000000 @@ -95,6 +96,13 @@ struct proc_event { __kernel_pid_t process_tgid; } sid; + struct ptrace_proc_event { + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; + __kernel_pid_t tracer_pid; + __kernel_pid_t tracer_tgid; + } ptrace; + struct exit_proc_event { __kernel_pid_t process_pid; __kernel_pid_t process_tgid; @@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task); void proc_exec_connector(struct task_struct *task); void proc_id_connector(struct task_struct *task, int which_id); void proc_sid_connector(struct task_struct *task); +void proc_ptrace_connector(struct task_struct *task, int which_id); void proc_exit_connector(struct task_struct *task); #else static inline void proc_fork_connector(struct task_struct *task) @@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task, static inline void proc_sid_connector(struct task_struct *task) {} +static inline void proc_ptrace_connector(struct task_struct *task, + int ptrace_id) +{} + static inline void proc_exit_connector(struct task_struct *task) {} #endif /* CONFIG_PROC_EVENTS */ diff --git a/include/linux/cordic.h b/include/linux/cordic.h new file mode 100644 index 00000000000..f932093e20c --- /dev/null +++ b/include/linux/cordic.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CORDIC_H_ +#define __CORDIC_H_ + +#include <linux/types.h> + +/** + * struct cordic_iq - i/q coordinate. + * + * @i: real part of coordinate (in phase). + * @q: imaginary part of coordinate (quadrature). + */ +struct cordic_iq { + s32 i; + s32 q; +}; + +/** + * cordic_calc_iq() - calculates the i/q coordinate for given angle. + * + * @theta: angle in degrees for which i/q coordinate is to be calculated. + * @coord: function output parameter holding the i/q coordinate. + * + * The function calculates the i/q coordinate for a given angle using + * cordic algorithm. The coordinate consists of a real (i) and an + * imaginary (q) part. The real part is essentially the cosine of the + * angle and the imaginary part is the sine of the angle. The returned + * values are scaled by 2^16 for precision. The range for theta is + * for -180 degrees to +180 degrees. Passed values outside this range are + * converted before doing the actual calculation. + */ +struct cordic_iq cordic_calc_iq(s32 theta); + +#endif /* __CORDIC_H_ */ diff --git a/include/linux/crc8.h b/include/linux/crc8.h new file mode 100644 index 00000000000..13c8dabb044 --- /dev/null +++ b/include/linux/crc8.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2011 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#ifndef __CRC8_H_ +#define __CRC8_H_ + +#include <linux/types.h> + +/* see usage of this value in crc8() description */ +#define CRC8_INIT_VALUE 0xFF + +/* + * Return value of crc8() indicating valid message+crc. This is true + * if a CRC is inverted before transmission. The CRC computed over the + * whole received bitstream is _table[x], where x is the bit pattern + * of the modification (almost always 0xff). + */ +#define CRC8_GOOD_VALUE(_table) (_table[0xFF]) + +/* required table size for crc8 algorithm */ +#define CRC8_TABLE_SIZE 256 + +/* helper macro assuring right table size is used */ +#define DECLARE_CRC8_TABLE(_table) \ + static u8 _table[CRC8_TABLE_SIZE] + +/** + * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * regular bit order (lsb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For lsb first direction x^7 maps to the lsb. So the polynomial is as below. + * + * - lsb first: poly = 10101011(1) = 0xAB + */ +void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. + * + * @table: table to be filled. + * @polynomial: polynomial for which table is to be filled. + * + * This function fills the provided table according the polynomial provided for + * reverse bit order (msb first). Polynomials in CRC algorithms are typically + * represented as shown below. + * + * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1 + * + * For msb first direction x^7 maps to the msb. So the polynomial is as below. + * + * - msb first: poly = (1)11010101 = 0xD5 + */ +void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial); + +/** + * crc8() - calculate a crc8 over the given input data. + * + * @table: crc table used for calculation. + * @pdata: pointer to data buffer. + * @nbytes: number of bytes in data buffer. + * @crc: previous returned crc8 value. + * + * The CRC8 is calculated using the polynomial given in crc8_populate_msb() + * or crc8_populate_lsb(). + * + * The caller provides the initial value (either %CRC8_INIT_VALUE + * or the previous returned value) to allow for processing of + * discontiguous blocks of data. When generating the CRC the + * caller is responsible for complementing the final return value + * and inserting it into the byte stream. When validating a byte + * stream (including CRC8), a final return value of %CRC8_GOOD_VALUE + * indicates the byte stream data can be considered valid. + * + * Reference: + * "A Painless Guide to CRC Error Detection Algorithms", ver 3, Aug 1993 + * Williams, Ross N., ross<at>ross.net + * (see URL http://www.ross.net/crc/download/crc_v3.txt). + */ +u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc); + +#endif /* __CRC8_H_ */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 19d90a55541..3f22d8d6d8a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -216,6 +216,7 @@ struct dentry_operations { #define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ #define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ #define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ +#define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) @@ -416,7 +417,12 @@ static inline bool d_mountpoint(struct dentry *dentry) return dentry->d_flags & DCACHE_MOUNTED; } -extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); +static inline bool d_need_lookup(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_NEED_LOOKUP; +} + +extern void d_clear_need_lookup(struct dentry *dentry); extern int sysctl_vfs_cache_pressure; diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index c52280047e2..65a2562f66b 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h @@ -203,6 +203,7 @@ struct dcbmsg { * @DCB_CMD_GFEATCFG: get DCBX features flags * @DCB_CMD_SFEATCFG: set DCBX features negotiation flags * @DCB_CMD_CEE_GET: get CEE aggregated configuration + * @DCB_CMD_IEEE_DEL: delete IEEE 802.1Qaz configuration */ enum dcbnl_commands { DCB_CMD_UNDEFINED, @@ -246,6 +247,7 @@ enum dcbnl_commands { DCB_CMD_SFEATCFG, DCB_CMD_CEE_GET, + DCB_CMD_IEEE_DEL, __DCB_CMD_ENUM_MAX, DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, @@ -331,18 +333,30 @@ enum ieee_attrs_app { #define DCB_ATTR_IEEE_APP_MAX (__DCB_ATTR_IEEE_APP_MAX - 1) /** - * enum cee_attrs - CEE DCBX get attributes + * enum cee_attrs - CEE DCBX get attributes. * * @DCB_ATTR_CEE_UNSPEC: unspecified * @DCB_ATTR_CEE_PEER_PG: peer PG configuration - get only * @DCB_ATTR_CEE_PEER_PFC: peer PFC configuration - get only - * @DCB_ATTR_CEE_PEER_APP: peer APP tlv - get only + * @DCB_ATTR_CEE_PEER_APP_TABLE: peer APP tlv - get only + * @DCB_ATTR_CEE_TX_PG: TX PG configuration (DCB_CMD_PGTX_GCFG) + * @DCB_ATTR_CEE_RX_PG: RX PG configuration (DCB_CMD_PGRX_GCFG) + * @DCB_ATTR_CEE_PFC: PFC configuration (DCB_CMD_PFC_GCFG) + * @DCB_ATTR_CEE_APP_TABLE: APP configuration (multi DCB_CMD_GAPP) + * @DCB_ATTR_CEE_FEAT: DCBX features flags (DCB_CMD_GFEATCFG) + * + * An aggregated collection of the cee std negotiated parameters. */ enum cee_attrs { DCB_ATTR_CEE_UNSPEC, DCB_ATTR_CEE_PEER_PG, DCB_ATTR_CEE_PEER_PFC, DCB_ATTR_CEE_PEER_APP_TABLE, + DCB_ATTR_CEE_TX_PG, + DCB_ATTR_CEE_RX_PG, + DCB_ATTR_CEE_PFC, + DCB_ATTR_CEE_APP_TABLE, + DCB_ATTR_CEE_FEAT, __DCB_ATTR_CEE_MAX }; #define DCB_ATTR_CEE_MAX (__DCB_ATTR_CEE_MAX - 1) @@ -355,6 +369,13 @@ enum peer_app_attr { }; #define DCB_ATTR_CEE_PEER_APP_MAX (__DCB_ATTR_CEE_PEER_APP_MAX - 1) +enum cee_attrs_app { + DCB_ATTR_CEE_APP_UNSPEC, + DCB_ATTR_CEE_APP, + __DCB_ATTR_CEE_APP_MAX +}; +#define DCB_ATTR_CEE_APP_MAX (__DCB_ATTR_CEE_APP_MAX - 1) + /** * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs * diff --git a/include/linux/dccp.h b/include/linux/dccp.h index d638e85dc50..710c04302a1 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -236,6 +236,7 @@ enum dccp_packet_dequeueing_policy { #ifdef __KERNEL__ #include <linux/in.h> +#include <linux/interrupt.h> #include <linux/ktime.h> #include <linux/list.h> #include <linux/uio.h> diff --git a/include/linux/device.h b/include/linux/device.h index e4f62d8896b..160d4ddb249 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -516,7 +516,7 @@ struct device_dma_parameters { * minimizes board-specific #ifdefs in drivers. * @power: For device power management. * See Documentation/power/devices.txt for details. - * @pwr_domain: Provide callbacks that are executed during system suspend, + * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. * @numa_node: NUMA node this device is close to. @@ -567,7 +567,7 @@ struct device { void *platform_data; /* Platform specific data, device core doesn't touch it */ struct dev_pm_info power; - struct dev_power_domain *pwr_domain; + struct dev_pm_domain *pm_domain; #ifdef CONFIG_NUMA int numa_node; /* NUMA node this device is close to */ diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h new file mode 100644 index 00000000000..95b6a82f595 --- /dev/null +++ b/include/linux/dma-direction.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_DMA_DIRECTION_H +#define _LINUX_DMA_DIRECTION_H +/* + * These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts. + */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; +#endif diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ba8319ae5fc..1a167c48d84 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -4,17 +4,9 @@ #include <linux/device.h> #include <linux/err.h> #include <linux/dma-attrs.h> +#include <linux/dma-direction.h> #include <linux/scatterlist.h> -/* These definitions mirror those in pci.h, so they can be used - * interchangeably with their PCI_ counterparts */ -enum dma_data_direction { - DMA_BIDIRECTIONAL = 0, - DMA_TO_DEVICE = 1, - DMA_FROM_DEVICE = 2, - DMA_NONE = 3, -}; - struct dma_map_ops { void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index eee7addec28..8fbf40e0713 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -23,7 +23,9 @@ #include <linux/device.h> #include <linux/uio.h> -#include <linux/dma-mapping.h> +#include <linux/dma-direction.h> + +struct scatterlist; /** * typedef dma_cookie_t - an opaque DMA cookie diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h new file mode 100644 index 00000000000..49638ea3b77 --- /dev/null +++ b/include/linux/dw_apb_timer.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2009 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * + * Shared with ARM platforms, Jamie Iles, Picochip 2011 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support for the Synopsys DesignWare APB Timers. + */ +#ifndef __DW_APB_TIMER_H__ +#define __DW_APB_TIMER_H__ + +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/interrupt.h> + +#define APBTMRS_REG_SIZE 0x14 + +struct dw_apb_timer { + void __iomem *base; + unsigned long freq; + int irq; +}; + +struct dw_apb_clock_event_device { + struct clock_event_device ced; + struct dw_apb_timer timer; + struct irqaction irqaction; + void (*eoi)(struct dw_apb_timer *); +}; + +struct dw_apb_clocksource { + struct dw_apb_timer timer; + struct clocksource cs; +}; + +void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); +void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); + +struct dw_apb_clock_event_device * +dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, + void __iomem *base, int irq, unsigned long freq); +struct dw_apb_clocksource * +dw_apb_clocksource_init(unsigned rating, char *name, void __iomem *base, + unsigned long freq); +void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); +void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); +cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); +void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs); + +#endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/efi.h b/include/linux/efi.h index e376270cd26..ec257269392 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -101,6 +101,13 @@ typedef struct { u64 attribute; } efi_memory_desc_t; +typedef struct { + efi_guid_t guid; + u32 headersize; + u32 flags; + u32 imagesize; +} efi_capsule_header_t; + typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* @@ -156,6 +163,9 @@ typedef struct { unsigned long set_variable; unsigned long get_next_high_mono_count; unsigned long reset_system; + unsigned long update_capsule; + unsigned long query_capsule_caps; + unsigned long query_variable_info; } efi_runtime_services_t; typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc); @@ -168,7 +178,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor); typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, - unsigned long attr, unsigned long data_size, + u32 attr, unsigned long data_size, void *data); typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); typedef void efi_reset_system_t (int reset_type, efi_status_t status, @@ -177,6 +187,17 @@ typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_siz unsigned long descriptor_size, u32 descriptor_version, efi_memory_desc_t *virtual_map); +typedef efi_status_t efi_query_variable_info_t(u32 attr, + u64 *storage_space, + u64 *remaining_space, + u64 *max_variable_size); +typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **capsules, + unsigned long count, + unsigned long sg_list); +typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, + unsigned long count, + u64 *max_size, + int *reset_type); /* * EFI Configuration Table and GUID definitions @@ -218,6 +239,13 @@ typedef struct { #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) +#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30)) +#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20)) +#define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10)) +#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00)) +#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10)) +#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02)) + typedef struct { efi_table_hdr_t hdr; unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ @@ -250,6 +278,7 @@ struct efi_memory_map { */ extern struct efi { efi_system_table_t *systab; /* EFI system table */ + unsigned int runtime_version; /* Runtime services version */ unsigned long mps; /* MPS table */ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ unsigned long acpi20; /* ACPI table (ACPI 2.0) */ @@ -266,6 +295,9 @@ extern struct efi { efi_get_variable_t *get_variable; efi_get_next_variable_t *get_next_variable; efi_set_variable_t *set_variable; + efi_query_variable_info_t *query_variable_info; + efi_update_capsule_t *update_capsule; + efi_query_capsule_caps_t *query_capsule_caps; efi_get_next_high_mono_count_t *get_next_high_mono_count; efi_reset_system_t *reset_system; efi_set_virtual_address_map_t *set_virtual_address_map; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 21a8ebf2dc3..d800d514218 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -146,7 +146,7 @@ extern struct request *elv_rb_latter_request(struct request_queue *, struct requ /* * rb support functions. */ -extern struct request *elv_rb_add(struct rb_root *, struct request *); +extern void elv_rb_add(struct rb_root *, struct request *); extern void elv_rb_del(struct rb_root *, struct request *); extern struct request *elv_rb_find(struct rb_root *, sector_t); diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index ab68f785fd1..05955cf0993 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -38,7 +38,7 @@ extern int eth_header(struct sk_buff *skb, struct net_device *dev, const void *daddr, const void *saddr, unsigned len); extern int eth_rebuild_header(struct sk_buff *skb); extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); -extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh); +extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); extern void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 439b173c588..c6e427ab65f 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -287,7 +287,7 @@ enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, ETH_SS_PRIV_FLAGS, - ETH_SS_NTUPLE_FILTERS, + ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */ ETH_SS_FEATURES, }; @@ -310,9 +310,21 @@ struct ethtool_sset_info { __u32's, etc. */ }; +/** + * enum ethtool_test_flags - flags definition of ethtool_test + * @ETH_TEST_FL_OFFLINE: if set perform online and offline tests, otherwise + * only online tests. + * @ETH_TEST_FL_FAILED: Driver set this flag if test fails. + * @ETH_TEST_FL_EXTERNAL_LB: Application request to perform external loopback + * test. + * @ETH_TEST_FL_EXTERNAL_LB_DONE: Driver performed the external loopback test + */ + enum ethtool_test_flags { - ETH_TEST_FL_OFFLINE = (1 << 0), /* online / offline */ - ETH_TEST_FL_FAILED = (1 << 1), /* test passed / failed */ + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), + ETH_TEST_FL_EXTERNAL_LB = (1 << 2), + ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3), }; /* for requesting NIC test and getting results*/ @@ -714,18 +726,6 @@ enum ethtool_sfeatures_retval_bits { /* needed by dev_disable_lro() */ extern int __ethtool_set_flags(struct net_device *dev, u32 flags); -struct ethtool_rx_ntuple_flow_spec_container { - struct ethtool_rx_ntuple_flow_spec fs; - struct list_head list; -}; - -struct ethtool_rx_ntuple_list { -#define ETHTOOL_MAX_NTUPLE_LIST_ENTRY 1024 -#define ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY 14 - struct list_head list; - unsigned int count; -}; - /** * enum ethtool_phys_id_state - indicator state for physical identification * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated @@ -758,7 +758,6 @@ u32 ethtool_op_get_ufo(struct net_device *dev); int ethtool_op_set_ufo(struct net_device *dev, u32 data); u32 ethtool_op_get_flags(struct net_device *dev); int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported); -void ethtool_ntuple_flush(struct net_device *dev); bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); /** @@ -865,7 +864,6 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported); * error code or zero. * @set_rx_ntuple: Set an RX n-tuple rule. Returns a negative error code * or zero. - * @get_rx_ntuple: Deprecated. * @get_rxfh_indir: Get the contents of the RX flow hash indirection table. * Returns a negative error code or zero. * @set_rxfh_indir: Set the contents of the RX flow hash indirection table. @@ -944,7 +942,6 @@ struct ethtool_ops { int (*reset)(struct net_device *, u32 *); int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *); - int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *); int (*get_rxfh_indir)(struct net_device *, struct ethtool_rxfh_indir *); int (*set_rxfh_indir)(struct net_device *, @@ -1017,7 +1014,7 @@ struct ethtool_ops { #define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ #define ETHTOOL_RESET 0x00000034 /* Reset hardware */ #define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ -#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */ +#define ETHTOOL_GRXNTUPLE 0x00000036 /* deprecated */ #define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ #define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ #define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 5e06acf95d0..0c473fd79ac 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -877,7 +877,7 @@ extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, extern void ext3_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ -extern int ext3_sync_file(struct file *, int); +extern int ext3_sync_file(struct file *, loff_t, loff_t, int); /* hash.c */ extern int ext3fs_dirhash(const char *name, int len, struct diff --git a/include/linux/fb.h b/include/linux/fb.h index 6a827487717..1d6836c498d 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -1043,7 +1043,8 @@ extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); -extern int fb_deferred_io_fsync(struct file *file, int datasync); +extern int fb_deferred_io_fsync(struct file *file, loff_t start, + loff_t end, int datasync); static inline bool fb_be_math(struct fb_info *info) { diff --git a/include/linux/fd.h b/include/linux/fd.h index f5d194af07a..72202b1b9a6 100644 --- a/include/linux/fd.h +++ b/include/linux/fd.h @@ -377,4 +377,26 @@ struct floppy_raw_cmd { #define FDEJECT _IO(2, 0x5a) /* eject the disk */ + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +struct compat_floppy_struct { + compat_uint_t size; + compat_uint_t sect; + compat_uint_t head; + compat_uint_t track; + compat_uint_t stretch; + unsigned char gap; + unsigned char rate; + unsigned char spec1; + unsigned char fmt_gap; + const compat_caddr_t name; +}; + +#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) +#endif +#endif + #endif diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 4ff09889c5c..357dbfc2829 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -30,10 +30,13 @@ #include <linux/types.h> #include <linux/firewire-constants.h> +/* available since kernel version 2.6.22 */ #define FW_CDEV_EVENT_BUS_RESET 0x00 #define FW_CDEV_EVENT_RESPONSE 0x01 #define FW_CDEV_EVENT_REQUEST 0x02 #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 + +/* available since kernel version 2.6.30 */ #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 @@ -120,24 +123,11 @@ struct fw_cdev_event_response { /** * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 - * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST - * @tcode: See &fw_cdev_event_request2 - * @offset: See &fw_cdev_event_request2 - * @handle: See &fw_cdev_event_request2 - * @length: See &fw_cdev_event_request2 - * @data: See &fw_cdev_event_request2 * * This event is sent instead of &fw_cdev_event_request2 if the kernel or - * the client implements ABI version <= 3. - * - * Unlike &fw_cdev_event_request2, the sender identity cannot be established, - * broadcast write requests cannot be distinguished from unicast writes, and - * @tcode of lock requests is %TCODE_LOCK_REQUEST. - * - * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as - * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send - * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl. + * the client implements ABI version <= 3. &fw_cdev_event_request lacks + * essential information; use &fw_cdev_event_request2 instead. */ struct fw_cdev_event_request { __u64 closure; @@ -452,29 +442,31 @@ union fw_cdev_event { * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and * %FW_CDEV_IOC_SET_ISO_CHANNELS */ -#define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */ /** * struct fw_cdev_get_info - General purpose information ioctl * @version: The version field is just a running serial number. Both an * input parameter (ABI version implemented by the client) and * output parameter (ABI version implemented by the kernel). - * A client must not fill in an %FW_CDEV_VERSION defined from an - * included kernel header file but the actual version for which - * the client was implemented. This is necessary for forward - * compatibility. We never break backwards compatibility, but - * may add more structs, events, and ioctls in later revisions. - * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration + * A client shall fill in the ABI @version for which the client + * was implemented. This is necessary for forward compatibility. + * @rom_length: If @rom is non-zero, up to @rom_length bytes of Configuration * ROM will be copied into that user space address. In either * case, @rom_length is updated with the actual length of the - * configuration ROM. + * Configuration ROM. * @rom: If non-zero, address of a buffer to be filled by a copy of the - * device's configuration ROM + * device's Configuration ROM * @bus_reset: If non-zero, address of a buffer to be filled by a * &struct fw_cdev_event_bus_reset with the current state * of the bus. This does not cause a bus reset to happen. * @bus_reset_closure: Value of &closure in this and subsequent bus reset events * @card: The index of the card this device belongs to + * + * The %FW_CDEV_IOC_GET_INFO ioctl is usually the very first one which a client + * performs right after it opened a /dev/fw* file. + * + * As a side effect, reception of %FW_CDEV_EVENT_BUS_RESET events to be read(2) + * is started by this ioctl. */ struct fw_cdev_get_info { __u32 version; @@ -612,7 +604,7 @@ struct fw_cdev_initiate_bus_reset { * @handle: Handle to the descriptor, written by the kernel * * Add a descriptor block and optionally a preceding immediate key to the local - * node's configuration ROM. + * node's Configuration ROM. * * The @key field specifies the upper 8 bits of the descriptor root directory * pointer and the @data and @length fields specify the contents. The @key @@ -627,9 +619,9 @@ struct fw_cdev_initiate_bus_reset { * If successful, the kernel adds the descriptor and writes back a @handle to * the kernel-side object to be used for later removal of the descriptor block * and immediate key. The kernel will also generate a bus reset to signal the - * change of the configuration ROM to other nodes. + * change of the Configuration ROM to other nodes. * - * This ioctl affects the configuration ROMs of all local nodes. + * This ioctl affects the Configuration ROMs of all local nodes. * The ioctl only succeeds on device files which represent a local node. */ struct fw_cdev_add_descriptor { @@ -641,13 +633,13 @@ struct fw_cdev_add_descriptor { }; /** - * struct fw_cdev_remove_descriptor - Remove contents from the configuration ROM + * struct fw_cdev_remove_descriptor - Remove contents from the Configuration ROM * @handle: Handle to the descriptor, as returned by the kernel when the * descriptor was added * * Remove a descriptor block and accompanying immediate key from the local - * nodes' configuration ROMs. The kernel will also generate a bus reset to - * signal the change of the configuration ROM to other nodes. + * nodes' Configuration ROMs. The kernel will also generate a bus reset to + * signal the change of the Configuration ROM to other nodes. */ struct fw_cdev_remove_descriptor { __u32 handle; @@ -863,13 +855,8 @@ struct fw_cdev_stop_iso { * @local_time: system time, in microseconds since the Epoch * @cycle_timer: Cycle Time register contents * - * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer - * and also the system clock (%CLOCK_REALTIME). This allows to express the - * receive time of an isochronous packet as a system time. - * - * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and - * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register - * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394. + * Same as %FW_CDEV_IOC_GET_CYCLE_TIMER2, but fixed to use %CLOCK_REALTIME + * and only with microseconds resolution. * * In version 1 and 2 of the ABI, this ioctl returned unreliable (non- * monotonic) @cycle_timer values on certain controllers. @@ -886,10 +873,17 @@ struct fw_cdev_get_cycle_timer { * @clk_id: input parameter, clock from which to get the system time * @cycle_timer: Cycle Time register contents * - * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like - * %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX' - * clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME - * and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW. + * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 ioctl reads the isochronous cycle timer + * and also the system clock. This allows to correlate reception time of + * isochronous packets with system time. + * + * @clk_id lets you choose a clock like with POSIX' clock_gettime function. + * Supported @clk_id values are POSIX' %CLOCK_REALTIME and %CLOCK_MONOTONIC + * and Linux' %CLOCK_MONOTONIC_RAW. + * + * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and + * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register + * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394. */ struct fw_cdev_get_cycle_timer2 { __s64 tv_sec; @@ -1011,4 +1005,6 @@ struct fw_cdev_receive_phy_packets { __u64 closure; }; +#define FW_CDEV_VERSION 3 /* Meaningless legacy macro; don't use it. */ + #endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index b5b97924786..0c35d6e767d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -32,7 +32,9 @@ #define SEEK_SET 0 /* seek relative to beginning of file */ #define SEEK_CUR 1 /* seek relative to current file position */ #define SEEK_END 2 /* seek relative to end of file */ -#define SEEK_MAX SEEK_END +#define SEEK_DATA 3 /* seek to the next data */ +#define SEEK_HOLE 4 /* seek to the next hole */ +#define SEEK_MAX SEEK_HOLE struct fstrim_range { __u64 start; @@ -63,6 +65,7 @@ struct inodes_stat_t { #define MAY_ACCESS 16 #define MAY_OPEN 32 #define MAY_CHDIR 64 +#define MAY_NOT_BLOCK 128 /* called from RCU mode, don't block */ /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond @@ -392,8 +395,9 @@ struct inodes_stat_t { #include <linux/semaphore.h> #include <linux/fiemap.h> #include <linux/rculist_bl.h> +#include <linux/shrinker.h> +#include <linux/atomic.h> -#include <asm/atomic.h> #include <asm/byteorder.h> struct export_operations; @@ -777,7 +781,7 @@ struct inode { struct timespec i_ctime; blkcnt_t i_blocks; unsigned short i_bytes; - struct rw_semaphore i_alloc_sem; + atomic_t i_dio_count; const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock *i_flock; struct address_space *i_mapping; @@ -1396,6 +1400,11 @@ struct super_block { struct list_head s_dentry_lru; /* unused dentry lru */ int s_nr_dentry_unused; /* # of dentry on lru */ + /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ + spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp; + struct list_head s_inode_lru; /* unused inode lru */ + int s_nr_inodes_unused; /* # of inodes on lru */ + struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; @@ -1438,8 +1447,14 @@ struct super_block { * Saved pool identifier for cleancache (-1 means none) */ int cleancache_poolid; + + struct shrinker s_shrink; /* per-sb shrinker handle */ }; +/* superblock cache pruning functions */ +extern void prune_icache_sb(struct super_block *sb, int nr_to_scan); +extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan); + extern struct timespec current_fs_time(struct super_block *sb); /* @@ -1454,10 +1469,6 @@ enum { #define vfs_check_frozen(sb, level) \ wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) -#define get_fs_excl() atomic_inc(¤t->fs_excl) -#define put_fs_excl() atomic_dec(¤t->fs_excl) -#define has_fs_excl() atomic_read(¤t->fs_excl) - /* * until VFS tracks user namespaces for inodes, just make all files * belong to init_user_ns @@ -1490,7 +1501,6 @@ extern void dentry_unhash(struct dentry *dentry); /* * VFS file helper functions. */ -extern int file_permission(struct file *, int); extern void inode_init_owner(struct inode *inode, const struct inode *dir, mode_t mode); /* @@ -1538,11 +1548,6 @@ struct block_device_operations; #define HAVE_COMPAT_IOCTL 1 #define HAVE_UNLOCKED_IOCTL 1 -/* - * NOTE: - * all file operations except setlease can be called without - * the big kernel lock held in all filesystems. - */ struct file_operations { struct module *owner; loff_t (*llseek) (struct file *, loff_t, int); @@ -1558,7 +1563,7 @@ struct file_operations { int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, int datasync); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); @@ -1573,13 +1578,11 @@ struct file_operations { loff_t len); }; -#define IPERM_FLAG_RCU 0x0001 - struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); - int (*permission) (struct inode *, int, unsigned int); - int (*check_acl)(struct inode *, int, unsigned int); + int (*permission) (struct inode *, int); + int (*check_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); void (*put_link) (struct dentry *, struct nameidata *, void *); @@ -1645,6 +1648,8 @@ struct super_operations { ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + int (*nr_cached_objects)(struct super_block *); + void (*free_cached_objects)(struct super_block *, int); }; /* @@ -1693,6 +1698,10 @@ struct super_operations { * set during data writeback, and cleared with a wakeup * on the bit address once it is done. * + * I_REFERENCED Marks the inode as recently references on the LRU list. + * + * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -1706,6 +1715,8 @@ struct super_operations { #define __I_SYNC 7 #define I_SYNC (1 << __I_SYNC) #define I_REFERENCED (1 << 8) +#define __I_DIO_WAKEUP 9 +#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) @@ -1816,7 +1827,6 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; - struct lock_class_key i_alloc_sem_key; }; extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, @@ -1837,6 +1847,8 @@ void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); +int get_anon_bdev(dev_t *); +void free_anon_bdev(dev_t); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), @@ -2188,16 +2200,38 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *); extern int inode_permission(struct inode *, int); -extern int generic_permission(struct inode *, int, unsigned int, - int (*check_acl)(struct inode *, int, unsigned int)); +extern int generic_permission(struct inode *, int); static inline bool execute_ok(struct inode *inode) { return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); } -extern int get_write_access(struct inode *); -extern int deny_write_access(struct file *); +/* + * get_write_access() gets write permission for a file. + * put_write_access() releases this write permission. + * This is used for regular files. + * We cannot support write (and maybe mmap read-write shared) accesses and + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode + * can have the following values: + * 0: no writers, no VM_DENYWRITE mappings + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist + * > 0: (i_writecount) users are writing to the file. + * + * Normally we operate on that counter with atomic_{inc,dec} and it's safe + * except for the cases where we don't hold i_writecount yet. Then we need to + * use {get,deny}_write_access() - these functions check the sign and refuse + * to do the change if sign is wrong. + */ +static inline int get_write_access(struct inode *inode) +{ + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline int deny_write_access(struct file *file) +{ + struct inode *inode = file->f_path.dentry->d_inode; + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; +} static inline void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); @@ -2317,7 +2351,8 @@ extern int generic_segment_checks(const struct iovec *iov, /* fs/block_dev.c */ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); -extern int blkdev_fsync(struct file *filp, int datasync); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -2368,6 +2403,8 @@ enum { }; void dio_end_io(struct bio *bio, int error); +void inode_dio_wait(struct inode *inode); +void inode_dio_done(struct inode *inode); ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, @@ -2375,14 +2412,17 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, dio_submit_t submit_io, int flags); static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, - struct inode *inode, struct block_device *bdev, const struct iovec *iov, - loff_t offset, unsigned long nr_segs, get_block_t get_block, - dio_iodone_t end_io) + struct inode *inode, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block) { - return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_block, end_io, NULL, + return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, + offset, nr_segs, get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } +#else +static inline void inode_dio_wait(struct inode *inode) +{ +} #endif extern const struct file_operations generic_ro_fops; @@ -2432,6 +2472,8 @@ extern struct super_block *get_active_super(struct block_device *bdev); extern struct super_block *user_get_super(dev_t); extern void drop_super(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers_type(struct file_system_type *, + void (*)(struct super_block *, void *), void *); extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); @@ -2444,7 +2486,7 @@ extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -extern int noop_fsync(struct file *, int); +extern int noop_fsync(struct file *, loff_t, loff_t, int); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, @@ -2469,7 +2511,7 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); -extern int generic_file_fsync(struct file *, int); +extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9d88e1cb5db..f0c0e8a47ae 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -19,6 +19,8 @@ #include <asm/ftrace.h> +struct ftrace_hash; + #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; @@ -29,8 +31,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); -struct ftrace_hash; - enum { FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_GLOBAL = 1 << 1, @@ -123,7 +123,8 @@ stack_trace_sysctl(struct ctl_table *table, int write, struct ftrace_func_command { struct list_head list; char *name; - int (*func)(char *func, char *cmd, + int (*func)(struct ftrace_hash *hash, + char *func, char *cmd, char *params, int enable); }; diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 59d3ef100eb..96efa6794ea 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -76,6 +76,7 @@ struct trace_iterator { struct trace_entry *ent; unsigned long lost_events; int leftover; + int ent_size; int cpu; u64 ts; @@ -129,6 +130,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc); +void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc, + struct pt_regs *regs); void trace_current_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event); diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h index 0437e377b55..574bea4013b 100644 --- a/include/linux/generic_acl.h +++ b/include/linux/generic_acl.h @@ -10,6 +10,6 @@ extern const struct xattr_handler generic_acl_default_handler; int generic_acl_init(struct inode *, struct inode *); int generic_acl_chmod(struct inode *); -int generic_check_acl(struct inode *inode, int mask, unsigned int flags); +int generic_check_acl(struct inode *inode, int mask); #endif /* LINUX_GENERIC_ACL_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 300d7582006..02fa4697a0e 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -420,7 +420,7 @@ static inline int get_disk_ro(struct gendisk *disk) extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); -extern void disk_check_events(struct gendisk *disk); +extern void disk_flush_events(struct gendisk *disk, unsigned int mask); extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); /* drivers/char/random.c */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index ba362171e8a..f743883f769 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -93,7 +93,7 @@ */ #define in_nmi() (preempt_count() & NMI_MASK) -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPT_COUNT) # define PREEMPT_CHECK_OFFSET 1 #else # define PREEMPT_CHECK_OFFSET 0 @@ -115,7 +115,7 @@ #define in_atomic_preempt_off() \ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT # define preemptible() (preempt_count() == 0 && !irqs_disabled()) # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) #else diff --git a/include/linux/hid.h b/include/linux/hid.h index 42f7e2fb501..9cf8e7ae745 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -453,7 +453,8 @@ struct hid_input { enum hid_type { HID_TYPE_OTHER = 0, - HID_TYPE_USBMOUSE + HID_TYPE_USBMOUSE, + HID_TYPE_USBNONE }; struct hid_driver; diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index d1e55fed2c7..6ae9c631a1b 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -73,6 +73,7 @@ static inline unsigned long hw_breakpoint_len(struct perf_event *bp) extern struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, + void *context, struct task_struct *tsk); /* FIXME: only change from the attr, and don't unregister */ @@ -85,11 +86,13 @@ modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr); extern struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, + void *context, int cpu); extern struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_overflow_handler_t triggered); + perf_overflow_handler_t triggered, + void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); extern int __register_perf_hw_breakpoint(struct perf_event *bp); @@ -115,6 +118,7 @@ static inline int __init init_hw_breakpoint(void) { return 0; } static inline struct perf_event * register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, + void *context, struct task_struct *tsk) { return NULL; } static inline int modify_user_hw_breakpoint(struct perf_event *bp, @@ -122,10 +126,12 @@ modify_user_hw_breakpoint(struct perf_event *bp, static inline struct perf_event * register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, perf_overflow_handler_t triggered, + void *context, int cpu) { return NULL; } static inline struct perf_event * __percpu * register_wide_hw_breakpoint(struct perf_event_attr *attr, - perf_overflow_handler_t triggered) { return NULL; } + perf_overflow_handler_t triggered, + void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline int diff --git a/include/linux/i8253.h b/include/linux/i8253.h new file mode 100644 index 00000000000..e6bb36a9751 --- /dev/null +++ b/include/linux/i8253.h @@ -0,0 +1,29 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Machine specific IO port address definition for generic. + * Written by Osamu Tomita <tomita@cinet.co.jp> + */ +#ifndef __LINUX_I8253_H +#define __LINUX_I8253_H + +#include <linux/param.h> +#include <linux/spinlock.h> +#include <linux/timex.h> + +/* i8253A PIT registers */ +#define PIT_MODE 0x43 +#define PIT_CH0 0x40 +#define PIT_CH2 0x42 + +#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) + +extern raw_spinlock_t i8253_lock; +extern struct clock_event_device i8253_clockevent; +extern void clockevent_i8253_init(bool oneshot); + +extern void setup_pit_timer(void); + +#endif /* __LINUX_I8253_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index bf56b6f7827..54c87896087 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -117,8 +117,19 @@ #define IEEE80211_MAX_MESH_ID_LEN 32 #define IEEE80211_QOS_CTL_LEN 2 -#define IEEE80211_QOS_CTL_TID_MASK 0x000F -#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 +/* 1d tag mask */ +#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 +/* TID mask */ +#define IEEE80211_QOS_CTL_TID_MASK 0x000f +/* EOSP */ +#define IEEE80211_QOS_CTL_EOSP 0x0010 +/* ACK policy */ +#define IEEE80211_QOS_CTL_ACK_POLICY_NORMAL 0x0000 +#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020 +#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040 +#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060 +/* A-MSDU 802.11n */ +#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080 /* U-APSD queue for WMM IEs sent by AP */ #define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7) @@ -1423,9 +1434,6 @@ enum ieee80211_sa_query_action { }; -/* A-MSDU 802.11n */ -#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080 - /* cipher suite selectors */ #define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00 #define WLAN_CIPHER_SUITE_WEP40 0x000FAC01 @@ -1445,6 +1453,43 @@ enum ieee80211_sa_query_action { #define WLAN_PMKID_LEN 16 +/* + * WMM/802.11e Tspec Element + */ +#define IEEE80211_WMM_IE_TSPEC_TID_MASK 0x0F +#define IEEE80211_WMM_IE_TSPEC_TID_SHIFT 1 + +enum ieee80211_tspec_status_code { + IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED = 0, + IEEE80211_TSPEC_STATUS_ADDTS_INVAL_PARAMS = 0x1, +}; + +struct ieee80211_tspec_ie { + u8 element_id; + u8 len; + u8 oui[3]; + u8 oui_type; + u8 oui_subtype; + u8 version; + __le16 tsinfo; + u8 tsinfo_resvd; + __le16 nominal_msdu; + __le16 max_msdu; + __le32 min_service_int; + __le32 max_service_int; + __le32 inactivity_int; + __le32 suspension_int; + __le32 service_start_time; + __le32 min_data_rate; + __le32 mean_data_rate; + __le32 peak_data_rate; + __le32 max_burst_size; + __le32 delay_bound; + __le32 min_phy_rate; + __le16 sba; + __le16 medium_time; +} __packed; + /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 0065ffd3226..a3d99ff6e3b 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -78,10 +78,15 @@ */ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ +#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index 7b318630139..c1486060f5e 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -49,6 +49,12 @@ struct sockaddr_ll { #define PACKET_VNET_HDR 15 #define PACKET_TX_TIMESTAMP 16 #define PACKET_TIMESTAMP 17 +#define PACKET_FANOUT 18 + +#define PACKET_FANOUT_HASH 0 +#define PACKET_FANOUT_LB 1 +#define PACKET_FANOUT_CPU 2 +#define PACKET_FANOUT_FLAG_DEFRAG 0x8000 struct tpacket_stats { unsigned int tp_packets; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index affa27380b7..44da4822bca 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -91,25 +91,6 @@ struct vlan_group { struct rcu_head rcu; }; -static inline struct net_device *vlan_group_get_device(struct vlan_group *vg, - u16 vlan_id) -{ - struct net_device **array; - array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; - return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL; -} - -static inline void vlan_group_set_device(struct vlan_group *vg, - u16 vlan_id, - struct net_device *dev) -{ - struct net_device **array; - if (!vg) - return; - array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN]; - array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev; -} - static inline int is_vlan_dev(struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; @@ -119,35 +100,18 @@ static inline int is_vlan_dev(struct net_device *dev) #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -/* Must be invoked with rcu_read_lock or with RTNL. */ -static inline struct net_device *vlan_find_dev(struct net_device *real_dev, - u16 vlan_id) -{ - struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); - - if (grp) - return vlan_group_get_device(grp, vlan_id); - - return NULL; -} +extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, + u16 vlan_id); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); -extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, - u16 vlan_tci, int polling); extern bool vlan_do_receive(struct sk_buff **skb); extern struct sk_buff *vlan_untag(struct sk_buff *skb); -extern gro_result_t -vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, - unsigned int vlan_tci, struct sk_buff *skb); -extern gro_result_t -vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, - unsigned int vlan_tci); #else -static inline struct net_device *vlan_find_dev(struct net_device *real_dev, - u16 vlan_id) +static inline struct net_device * +__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) { return NULL; } @@ -164,13 +128,6 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev) return 0; } -static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, - u16 vlan_tci, int polling) -{ - BUG(); - return NET_XMIT_SUCCESS; -} - static inline bool vlan_do_receive(struct sk_buff **skb) { if ((*skb)->vlan_tci & VLAN_VID_MASK) @@ -182,49 +139,9 @@ static inline struct sk_buff *vlan_untag(struct sk_buff *skb) { return skb; } - -static inline gro_result_t -vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, - unsigned int vlan_tci, struct sk_buff *skb) -{ - return GRO_DROP; -} - -static inline gro_result_t -vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, - unsigned int vlan_tci) -{ - return GRO_DROP; -} #endif /** - * vlan_hwaccel_rx - netif_rx wrapper for VLAN RX acceleration - * @skb: buffer - * @grp: vlan group - * @vlan_tci: VLAN TCI as received from the card - */ -static inline int vlan_hwaccel_rx(struct sk_buff *skb, - struct vlan_group *grp, - u16 vlan_tci) -{ - return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0); -} - -/** - * vlan_hwaccel_receive_skb - netif_receive_skb wrapper for VLAN RX acceleration - * @skb: buffer - * @grp: vlan group - * @vlan_tci: VLAN TCI as received from the card - */ -static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb, - struct vlan_group *grp, - u16 vlan_tci) -{ - return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1); -} - -/** * vlan_insert_tag - regular VLAN tag inserting * @skb: skbuff to tag * @vlan_tci: VLAN TCI to insert diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h index c4335faebb6..2cf55afbcd4 100644 --- a/include/linux/inet_lro.h +++ b/include/linux/inet_lro.h @@ -50,7 +50,6 @@ struct net_lro_desc { struct skb_frag_struct *next_frag; struct iphdr *iph; struct tcphdr *tcph; - struct vlan_group *vgrp; __wsum data_csum; __be32 tcp_rcv_tsecr; __be32 tcp_rcv_tsval; @@ -60,7 +59,6 @@ struct net_lro_desc { u16 ip_tot_len; u16 tcp_saw_tstamp; /* timestamps enabled */ __be16 tcp_window; - u16 vlan_tag; int pkt_aggr_cnt; /* counts aggregated packets */ int vlan_packet; int mss; @@ -137,16 +135,6 @@ void lro_receive_skb(struct net_lro_mgr *lro_mgr, void *priv); /* - * Processes a SKB with VLAN HW acceleration support - */ - -void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr, - struct sk_buff *skb, - struct vlan_group *vgrp, - u16 vlan_tag, - void *priv); - -/* * Processes a fragment list * * This functions aggregate fragments and generate SKBs do pass @@ -165,13 +153,6 @@ void lro_receive_frags(struct net_lro_mgr *lro_mgr, struct skb_frag_struct *frags, int len, int true_size, void *priv, __wsum sum); -void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr, - struct skb_frag_struct *frags, - int len, int true_size, - struct vlan_group *vgrp, - u16 vlan_tag, - void *priv, __wsum sum); - /* * Forward all aggregated SKBs held by lro_mgr to network stack */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 580f70c0239..d14e058aaee 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -176,7 +176,6 @@ extern struct cred init_cred; .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ .journal_info = NULL, \ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ - .fs_excl = ATOMIC_INIT(0), \ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ .pids = { \ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index b2eee896dcb..5037a0ad231 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -5,6 +5,14 @@ #include <linux/rcupdate.h> struct cfq_queue; +struct cfq_ttime { + unsigned long last_end_request; + + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; +}; + struct cfq_io_context { void *key; @@ -12,11 +20,7 @@ struct cfq_io_context { struct io_context *ioc; - unsigned long last_end_request; - - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; + struct cfq_ttime ttime; struct list_head queue_list; struct hlist_node cic_list; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0a2ba409899..9940319d6f9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -19,6 +19,8 @@ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H +#include <linux/errno.h> + #define IOMMU_READ (1) #define IOMMU_WRITE (2) #define IOMMU_CACHE (4) /* DMA cache coherency */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 8b453844663..5f695041090 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -96,11 +96,6 @@ enum { #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) -static inline __deprecated bool CHECK_IRQ_PER_CPU(unsigned int status) -{ - return status & IRQ_PER_CPU; -} - /* * Return value for chip->irq_set_affinity() * @@ -676,7 +671,8 @@ void irq_gc_mask_disable_reg(struct irq_data *d); void irq_gc_mask_set_bit(struct irq_data *d); void irq_gc_mask_clr_bit(struct irq_data *d); void irq_gc_unmask_enable_reg(struct irq_data *d); -void irq_gc_ack(struct irq_data *d); +void irq_gc_ack_set_bit(struct irq_data *d); +void irq_gc_ack_clr_bit(struct irq_data *d); void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); void irq_gc_eoi(struct irq_data *d); int irq_gc_set_wake(struct irq_data *d, unsigned int on); diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h index f1e6c184f14..f0a2f8b0aa1 100644 --- a/include/linux/iscsi_boot_sysfs.h +++ b/include/linux/iscsi_boot_sysfs.h @@ -92,6 +92,13 @@ struct iscsi_boot_kobj { * properties. */ mode_t (*is_visible) (void *data, int type); + + /* + * Driver specific release function. + * + * The function should free the data passed in. + */ + void (*release) (void *data); }; struct iscsi_boot_kset { @@ -103,18 +110,21 @@ struct iscsi_boot_kobj * iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)); + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)); + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), - mode_t (*is_visible) (void *data, int type)); + mode_t (*is_visible) (void *data, int type), + void (*release) (void *data)); struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 953352a8833..567a6f7bbee 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -121,7 +121,7 @@ extern int _cond_resched(void); # define might_resched() do { } while (0) #endif -#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP void __might_sleep(const char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 55ef181521f..2c366b52f50 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -161,6 +161,7 @@ struct kvm_pit_config { #define KVM_EXIT_NMI 16 #define KVM_EXIT_INTERNAL_ERROR 17 #define KVM_EXIT_OSI 18 +#define KVM_EXIT_PAPR_HCALL 19 /* For KVM_EXIT_INTERNAL_ERROR */ #define KVM_INTERNAL_ERROR_EMULATION 1 @@ -264,6 +265,11 @@ struct kvm_run { struct { __u64 gprs[32]; } osi; + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; /* Fix the size of the union. */ char padding[256]; }; @@ -544,6 +550,9 @@ struct kvm_ppc_pvinfo { #define KVM_CAP_TSC_CONTROL 60 #define KVM_CAP_GET_TSC_KHZ 61 #define KVM_CAP_PPC_BOOKE_SREGS 62 +#define KVM_CAP_SPAPR_TCE 63 +#define KVM_CAP_PPC_SMT 64 +#define KVM_CAP_PPC_RMA 65 #ifdef KVM_CAP_IRQ_ROUTING @@ -746,6 +755,9 @@ struct kvm_clock_data { /* Available with KVM_CAP_XCRS */ #define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs) #define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs) +#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce) +/* Available with KVM_CAP_RMA */ +#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) @@ -773,20 +785,14 @@ struct kvm_assigned_pci_dev { struct kvm_assigned_irq { __u32 assigned_dev_id; - __u32 host_irq; + __u32 host_irq; /* ignored (legacy field) */ __u32 guest_irq; __u32 flags; union { - struct { - __u32 addr_lo; - __u32 addr_hi; - __u32 data; - } guest_msi; __u32 reserved[12]; }; }; - struct kvm_assigned_msix_nr { __u32 assigned_dev_id; __u16 entry_nr; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 31ebb59cbd2..eabb21a30c3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -47,6 +47,7 @@ #define KVM_REQ_DEACTIVATE_FPU 10 #define KVM_REQ_EVENT 11 #define KVM_REQ_APF_HALT 12 +#define KVM_REQ_STEAL_UPDATE 13 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 @@ -326,12 +327,17 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } extern struct page *bad_page; +extern struct page *fault_page; + extern pfn_t bad_pfn; +extern pfn_t fault_pfn; int is_error_page(struct page *page); int is_error_pfn(pfn_t pfn); int is_hwpoison_pfn(pfn_t pfn); int is_fault_pfn(pfn_t pfn); +int is_noslot_pfn(pfn_t pfn); +int is_invalid_pfn(pfn_t pfn); int kvm_is_error_hva(unsigned long addr); int kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, @@ -381,6 +387,8 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 2fb1dcbcb5a..9962c6bb131 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h @@ -59,8 +59,6 @@ struct lguest_data { unsigned long reserve_mem; /* KHz for the TSC clock. */ u32 tsc_khz; - /* Page where the top-level pagetable is */ - unsigned long pgdir; /* Fields initialized by the Guest at boot: */ /* Instruction range to suppress interrupts even if enabled */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 5a9926b3407..efd6f980076 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -74,6 +74,16 @@ #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#define ata_print_version_once(dev, version) \ +({ \ + static bool __print_once; \ + \ + if (!__print_once) { \ + __print_once = true; \ + ata_print_version(dev, version); \ + } \ +}) + /* NEW: debug levels */ #define HAVE_LIBATA_MSG 1 @@ -1244,20 +1254,50 @@ static inline int sata_srst_pmp(struct ata_link *link) /* * printk helpers */ -#define ata_port_printk(ap, lv, fmt, args...) \ - printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) - -#define ata_link_printk(link, lv, fmt, args...) do { \ - if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \ - printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ - (link)->pmp , ##args); \ - else \ - printk("%sata%u: "fmt, lv, (link)->ap->print_id , ##args); \ - } while(0) - -#define ata_dev_printk(dev, lv, fmt, args...) \ - printk("%sata%u.%02u: "fmt, lv, (dev)->link->ap->print_id, \ - (dev)->link->pmp + (dev)->devno , ##args) +__attribute__((format (printf, 3, 4))) +int ata_port_printk(const struct ata_port *ap, const char *level, + const char *fmt, ...); +__attribute__((format (printf, 3, 4))) +int ata_link_printk(const struct ata_link *link, const char *level, + const char *fmt, ...); +__attribute__((format (printf, 3, 4))) +int ata_dev_printk(const struct ata_device *dev, const char *level, + const char *fmt, ...); + +#define ata_port_err(ap, fmt, ...) \ + ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_port_warn(ap, fmt, ...) \ + ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_port_notice(ap, fmt, ...) \ + ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_port_info(ap, fmt, ...) \ + ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_port_dbg(ap, fmt, ...) \ + ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_link_err(link, fmt, ...) \ + ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_link_warn(link, fmt, ...) \ + ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_link_notice(link, fmt, ...) \ + ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_link_info(link, fmt, ...) \ + ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_link_dbg(link, fmt, ...) \ + ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__) + +#define ata_dev_err(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__) +#define ata_dev_warn(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__) +#define ata_dev_notice(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__) +#define ata_dev_info(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) +#define ata_dev_dbg(dev, fmt, ...) \ + ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__) + +void ata_print_version(const struct device *dev, const char *version); /* * ata_eh_info helpers diff --git a/include/linux/memory.h b/include/linux/memory.h index e1e3b2b84f8..935699b30b7 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -20,6 +20,8 @@ #include <linux/compiler.h> #include <linux/mutex.h> +#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) + struct memory_block { unsigned long start_section_nr; unsigned long end_section_nr; diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 5a90266c3a5..0dc98044d8b 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -68,6 +68,11 @@ * controller and report the event to the driver. */ #define TMIO_MMC_HAS_COLD_CD (1 << 3) +/* + * Some controllers require waiting for the SD bus to become + * idle before writing to some registers. + */ +#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4) int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); @@ -80,6 +85,8 @@ struct tmio_mmc_dma { int alignment_shift; }; +struct tmio_mmc_host; + /* * data for the MMC controller */ @@ -94,6 +101,7 @@ struct tmio_mmc_data { void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); int (*get_cd)(struct platform_device *host); + int (*write16_hook)(struct tmio_mmc_host *host, int addr); }; static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata) diff --git a/include/linux/mii.h b/include/linux/mii.h index 359fba88027..103113a2fd1 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -128,6 +128,8 @@ /* 1000BASE-T Control register */ #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ #define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 /* 1000BASE-T Status register */ #define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 9a18667c13c..b56e4587208 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -123,6 +123,9 @@ enum { /* debug commands */ MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, MLX4_CMD_SET_DEBUG_MSG = 0x2b, + + /* statistics commands */ + MLX4_CMD_QUERY_IF_STAT = 0X54, }; enum { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 8985768e2c0..387329e0230 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -58,22 +58,28 @@ enum { }; enum { - MLX4_DEV_CAP_FLAG_RC = 1 << 0, - MLX4_DEV_CAP_FLAG_UC = 1 << 1, - MLX4_DEV_CAP_FLAG_UD = 1 << 2, - MLX4_DEV_CAP_FLAG_SRQ = 1 << 6, - MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, - MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, - MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, - MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, - MLX4_DEV_CAP_FLAG_BLH = 1 << 15, - MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, - MLX4_DEV_CAP_FLAG_APM = 1 << 17, - MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, - MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19, - MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20, - MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21, - MLX4_DEV_CAP_FLAG_IBOE = 1 << 30 + MLX4_DEV_CAP_FLAG_RC = 1LL << 0, + MLX4_DEV_CAP_FLAG_UC = 1LL << 1, + MLX4_DEV_CAP_FLAG_UD = 1LL << 2, + MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6, + MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7, + MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12, + MLX4_DEV_CAP_FLAG_BLH = 1LL << 15, + MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16, + MLX4_DEV_CAP_FLAG_APM = 1LL << 17, + MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19, + MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20, + MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21, + MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30, + MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32, + MLX4_DEV_CAP_FLAG_WOL = 1LL << 38, + MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40, + MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, + MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, + MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48 }; enum { @@ -253,15 +259,10 @@ struct mlx4_caps { int mtt_entry_sz; u32 max_msg_sz; u32 page_size_cap; - u32 flags; + u64 flags; u32 bmme_flags; u32 reserved_lkey; u16 stat_rate_support; - int udp_rss; - int loopback_support; - int vep_uc_steering; - int vep_mc_steering; - int wol; u8 port_width_cap[MLX4_MAX_PORTS + 1]; int max_gso_sz; int reserved_qps_cnt[MLX4_NUM_QP_REGION]; @@ -274,6 +275,7 @@ struct mlx4_caps { u8 supported_type[MLX4_MAX_PORTS + 1]; u32 port_mask; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; + u32 max_counters; }; struct mlx4_buf_list { @@ -438,6 +440,17 @@ union mlx4_ext_av { struct mlx4_eth_av eth; }; +struct mlx4_counter { + u8 reserved1[3]; + u8 counter_mode; + __be32 num_ifc; + u32 reserved2[2]; + __be64 rx_frames; + __be64 rx_bytes; + __be64 tx_frames; + __be64 tx_bytes; +}; + struct mlx4_dev { struct pci_dev *pdev; unsigned long flags; @@ -568,4 +581,7 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); +int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); +void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 9e9eb21056c..4001c8249db 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -54,7 +54,8 @@ enum mlx4_qp_optpar { MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12, MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, - MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16 + MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16, + MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20 }; enum mlx4_qp_state { @@ -99,7 +100,7 @@ struct mlx4_qp_path { u8 fl; u8 reserved1[2]; u8 pkey_index; - u8 reserved2; + u8 counter_index; u8 grh_mylmc; __be16 rlid; u8 ackto; @@ -111,8 +112,7 @@ struct mlx4_qp_path { u8 sched_queue; u8 vlan_index; u8 reserved3[2]; - u8 counter_index; - u8 reserved4; + u8 reserved4[2]; u8 dmac[6]; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 9670f71d7be..8a45ad22a17 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -15,6 +15,7 @@ #include <linux/range.h> #include <linux/pfn.h> #include <linux/bit_spinlock.h> +#include <linux/shrinker.h> struct mempolicy; struct anon_vma; @@ -1121,44 +1122,6 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) } #endif -/* - * This struct is used to pass information from page reclaim to the shrinkers. - * We consolidate the values for easier extention later. - */ -struct shrink_control { - gfp_t gfp_mask; - - /* How many slab objects shrinker() should scan and try to reclaim */ - unsigned long nr_to_scan; -}; - -/* - * A callback you can register to apply pressure to ageable caches. - * - * 'sc' is passed shrink_control which includes a count 'nr_to_scan' - * and a 'gfpmask'. It should look through the least-recently-used - * 'nr_to_scan' entries and attempt to free them up. It should return - * the number of objects which remain in the cache. If it returns -1, it means - * it cannot do any scanning at this time (eg. there is a risk of deadlock). - * - * The 'gfpmask' refers to the allocation we are currently trying to - * fulfil. - * - * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is - * querying the cache size, so a fastpath for that case is appropriate. - */ -struct shrinker { - int (*shrink)(struct shrinker *, struct shrink_control *sc); - int seeks; /* seeks to recreate an obj */ - - /* These are for internal use */ - struct list_head list; - long nr; /* objs pending delete */ -}; -#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ -extern void register_shrinker(struct shrinker *); -extern void unregister_shrinker(struct shrinker *); - int vma_wants_writenotify(struct vm_area_struct *vma); extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, @@ -1313,6 +1276,7 @@ extern void remove_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_all_active_ranges(void); void sort_node_map(void); +unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); extern unsigned long absent_pages_in_range(unsigned long start_pfn, diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h index 39d787c229c..23acc3baa07 100644 --- a/include/linux/mmc/boot.h +++ b/include/linux/mmc/boot.h @@ -1,7 +1,7 @@ -#ifndef MMC_BOOT_H -#define MMC_BOOT_H +#ifndef LINUX_MMC_BOOT_H +#define LINUX_MMC_BOOT_H enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT, MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE }; -#endif +#endif /* LINUX_MMC_BOOT_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index c6927a4d157..b460fc2af8a 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -64,6 +64,19 @@ struct mmc_ext_csd { unsigned long long enhanced_area_offset; /* Units: Byte */ unsigned int enhanced_area_size; /* Units: KB */ unsigned int boot_size; /* in bytes */ + u8 raw_partition_support; /* 160 */ + u8 raw_erased_mem_count; /* 181 */ + u8 raw_ext_csd_structure; /* 194 */ + u8 raw_card_type; /* 196 */ + u8 raw_s_a_timeout; /* 217 */ + u8 raw_hc_erase_gap_size; /* 221 */ + u8 raw_erase_timeout_mult; /* 223 */ + u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_sec_trim_mult; /* 229 */ + u8 raw_sec_erase_mult; /* 230 */ + u8 raw_sec_feature_support;/* 231 */ + u8 raw_trim_mult; /* 232 */ + u8 raw_sectors[4]; /* 212 - 4 bytes */ }; struct sd_scr { @@ -390,4 +403,4 @@ extern void mmc_unregister_driver(struct mmc_driver *); extern void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table); -#endif +#endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index b6718e549a5..b8b1b7a311f 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -117,6 +117,7 @@ struct mmc_data { unsigned int sg_len; /* size of scatter list */ struct scatterlist *sg; /* I/O scatter list */ + s32 host_cookie; /* host private data */ }; struct mmc_request { @@ -125,13 +126,16 @@ struct mmc_request { struct mmc_data *data; struct mmc_command *stop; - void *done_data; /* completion data */ + struct completion completion; void (*done)(struct mmc_request *);/* completion function */ }; struct mmc_host; struct mmc_card; +struct mmc_async_req; +extern struct mmc_async_req *mmc_start_req(struct mmc_host *, + struct mmc_async_req *, int *); extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *); extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *); @@ -155,6 +159,7 @@ extern int mmc_can_trim(struct mmc_card *card); extern int mmc_can_secure_erase_trim(struct mmc_card *card); extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, unsigned int nr); +extern unsigned int mmc_calc_max_discard(struct mmc_card *card); extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); @@ -179,4 +184,4 @@ static inline void mmc_claim_host(struct mmc_host *host) extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); -#endif +#endif /* LINUX_MMC_CORE_H */ diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h index bdd7ceeb99e..6b46819705d 100644 --- a/include/linux/mmc/dw_mmc.h +++ b/include/linux/mmc/dw_mmc.h @@ -11,8 +11,8 @@ * (at your option) any later version. */ -#ifndef _LINUX_MMC_DW_MMC_H_ -#define _LINUX_MMC_DW_MMC_H_ +#ifndef LINUX_MMC_DW_MMC_H +#define LINUX_MMC_DW_MMC_H #define MAX_MCI_SLOTS 2 @@ -48,6 +48,7 @@ struct mmc_data; * @data: The data currently being transferred, or NULL if no data * transfer is in progress. * @use_dma: Whether DMA channel is initialized or not. + * @using_dma: Whether DMA is in use for the current transfer. * @sg_dma: Bus address of DMA buffer. * @sg_cpu: Virtual address of DMA buffer. * @dma_ops: Pointer to platform-specific DMA callbacks. @@ -74,7 +75,11 @@ struct mmc_data; * @pdev: Platform device associated with the MMC controller. * @pdata: Platform data associated with the MMC controller. * @slot: Slots sharing this MMC controller. + * @fifo_depth: depth of FIFO. * @data_shift: log2 of FIFO item size. + * @part_buf_start: Start index in part_buf. + * @part_buf_count: Bytes of partial data in part_buf. + * @part_buf: Simple buffer for partial fifo reads/writes. * @push_data: Pointer to FIFO push function. * @pull_data: Pointer to FIFO pull function. * @quirks: Set of quirks that apply to specific versions of the IP. @@ -117,6 +122,7 @@ struct dw_mci { /* DMA interface members*/ int use_dma; + int using_dma; dma_addr_t sg_dma; void *sg_cpu; @@ -131,7 +137,7 @@ struct dw_mci { u32 stop_cmdr; u32 dir_status; struct tasklet_struct tasklet; - struct tasklet_struct card_tasklet; + struct work_struct card_work; unsigned long pending_events; unsigned long completed_events; enum dw_mci_state state; @@ -146,7 +152,15 @@ struct dw_mci { struct dw_mci_slot *slot[MAX_MCI_SLOTS]; /* FIFO push and pull */ + int fifo_depth; int data_shift; + u8 part_buf_start; + u8 part_buf_count; + union { + u16 part_buf16; + u32 part_buf32; + u64 part_buf; + }; void (*push_data)(struct dw_mci *host, void *buf, int cnt); void (*pull_data)(struct dw_mci *host, void *buf, int cnt); @@ -196,6 +210,12 @@ struct dw_mci_board { unsigned int bus_hz; /* Bus speed */ unsigned int caps; /* Capabilities */ + /* + * Override fifo depth. If 0, autodetect it from the FIFOTH register, + * but note that this may not be reliable after a bootloader has used + * it. + */ + unsigned int fifo_depth; /* delay in mS before detecting cards after interrupt */ u32 detect_delay_ms; @@ -219,4 +239,4 @@ struct dw_mci_board { struct block_settings *blk_settings; }; -#endif /* _LINUX_MMC_DW_MMC_H_ */ +#endif /* LINUX_MMC_DW_MMC_H */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 1ee4424462e..0f83858147a 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -106,6 +106,15 @@ struct mmc_host_ops { */ int (*enable)(struct mmc_host *host); int (*disable)(struct mmc_host *host, int lazy); + /* + * It is optional for the host to implement pre_req and post_req in + * order to support double buffering of requests (prepare one + * request while another request is active). + */ + void (*post_req)(struct mmc_host *host, struct mmc_request *req, + int err); + void (*pre_req)(struct mmc_host *host, struct mmc_request *req, + bool is_first_req); void (*request)(struct mmc_host *host, struct mmc_request *req); /* * Avoid calling these three functions too often or in a "fast path", @@ -139,11 +148,22 @@ struct mmc_host_ops { int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); int (*execute_tuning)(struct mmc_host *host); void (*enable_preset_value)(struct mmc_host *host, bool enable); + int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); }; struct mmc_card; struct device; +struct mmc_async_req { + /* active mmc request */ + struct mmc_request *mrq; + /* + * Check error status of completed mmc request. + * Returns 0 if success otherwise non zero. + */ + int (*err_check) (struct mmc_card *, struct mmc_async_req *); +}; + struct mmc_host { struct device *parent; struct device class_dev; @@ -231,6 +251,7 @@ struct mmc_host { unsigned int max_req_size; /* maximum number of bytes in one req */ unsigned int max_blk_size; /* maximum size of one mmc block */ unsigned int max_blk_count; /* maximum number of blocks in one req */ + unsigned int max_discard_to; /* max. discard timeout in ms */ /* private data */ spinlock_t lock; /* lock for claim and bus ops */ @@ -281,6 +302,8 @@ struct mmc_host { struct dentry *debugfs_root; + struct mmc_async_req *areq; /* active async req */ + unsigned long private[0] ____cacheline_aligned; }; @@ -373,5 +396,4 @@ static inline int mmc_host_cmd23(struct mmc_host *host) { return host->caps & MMC_CAP_CMD23; } -#endif - +#endif /* LINUX_MMC_HOST_H */ diff --git a/include/linux/mmc/ioctl.h b/include/linux/mmc/ioctl.h index 5baf2983a12..8fa5bc5f805 100644 --- a/include/linux/mmc/ioctl.h +++ b/include/linux/mmc/ioctl.h @@ -51,4 +51,4 @@ struct mmc_ioc_cmd { * block device operations. */ #define MMC_IOC_MAX_BYTES (512L * 256) -#endif /* LINUX_MMC_IOCTL_H */ +#endif /* LINUX_MMC_IOCTL_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index ac26a685cca..5a794cb503e 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -21,8 +21,8 @@ * 15 May 2002 */ -#ifndef MMC_MMC_H -#define MMC_MMC_H +#ifndef LINUX_MMC_MMC_H +#define LINUX_MMC_MMC_H /* Standard MMC commands (4.1) type argument response */ /* class 1 */ @@ -140,6 +140,16 @@ static inline bool mmc_op_multi(u32 opcode) #define R1_SWITCH_ERROR (1 << 7) /* sx, c */ #define R1_APP_CMD (1 << 5) /* sr, c */ +#define R1_STATE_IDLE 0 +#define R1_STATE_READY 1 +#define R1_STATE_IDENT 2 +#define R1_STATE_STBY 3 +#define R1_STATE_TRAN 4 +#define R1_STATE_DATA 5 +#define R1_STATE_RCV 6 +#define R1_STATE_PRG 7 +#define R1_STATE_DIS 8 + /* * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS * R1 is the low order byte; R2 is the next highest byte, when present. @@ -327,5 +337,4 @@ struct _mmc_csd { #define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */ #define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */ -#endif /* MMC_MMC_PROTOCOL_H */ - +#endif /* LINUX_MMC_MMC_H */ diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h index d37aac49cf9..4a139204c20 100644 --- a/include/linux/mmc/pm.h +++ b/include/linux/mmc/pm.h @@ -27,4 +27,4 @@ typedef unsigned int mmc_pm_flag_t; #define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */ #define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */ -#endif +#endif /* LINUX_MMC_PM_H */ diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 7d35d52c3df..1ebcf9ba125 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -9,8 +9,8 @@ * your option) any later version. */ -#ifndef MMC_SD_H -#define MMC_SD_H +#ifndef LINUX_MMC_SD_H +#define LINUX_MMC_SD_H /* SD commands type argument response */ /* class 0 */ @@ -91,5 +91,4 @@ #define SD_SWITCH_ACCESS_DEF 0 #define SD_SWITCH_ACCESS_HS 1 -#endif - +#endif /* LINUX_MMC_SD_H */ diff --git a/include/linux/mmc/sdhci-pltfm.h b/include/linux/mmc/sdhci-pltfm.h deleted file mode 100644 index 548d59d404c..00000000000 --- a/include/linux/mmc/sdhci-pltfm.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Platform data declarations for the sdhci-pltfm driver. - * - * Copyright (c) 2010 MontaVista Software, LLC. - * - * Author: Anton Vorontsov <avorontsov@ru.mvista.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -#ifndef _SDHCI_PLTFM_H -#define _SDHCI_PLTFM_H - -struct sdhci_ops; -struct sdhci_host; - -/** - * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks - * @ops: optional pointer to the platform-provided SDHCI ops - * @quirks: optional SDHCI quirks - * @init: optional hook that is called during device probe, before the - * driver tries to access any SDHCI registers - * @exit: optional hook that is called during device removal - */ -struct sdhci_pltfm_data { - struct sdhci_ops *ops; - unsigned int quirks; - int (*init)(struct sdhci_host *host, struct sdhci_pltfm_data *pdata); - void (*exit)(struct sdhci_host *host); -}; - -#endif /* _SDHCI_PLTFM_H */ diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h index 9188c973f3e..5cdc96da9dd 100644 --- a/include/linux/mmc/sdhci-spear.h +++ b/include/linux/mmc/sdhci-spear.h @@ -11,8 +11,8 @@ * warranty of any kind, whether express or implied. */ -#ifndef MMC_SDHCI_SPEAR_H -#define MMC_SDHCI_SPEAR_H +#ifndef LINUX_MMC_SDHCI_SPEAR_H +#define LINUX_MMC_SDHCI_SPEAR_H #include <linux/platform_device.h> /* @@ -39,4 +39,4 @@ sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data) pdev->dev.platform_data = data; } -#endif /* MMC_SDHCI_SPEAR_H */ +#endif /* LINUX_MMC_SDHCI_SPEAR_H */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 6a68c4eb4e4..5666f3abfab 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -8,8 +8,8 @@ * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ -#ifndef __SDHCI_H -#define __SDHCI_H +#ifndef LINUX_MMC_SDHCI_H +#define LINUX_MMC_SDHCI_H #include <linux/scatterlist.h> #include <linux/compiler.h> @@ -162,4 +162,4 @@ struct sdhci_host { unsigned long private[0] ____cacheline_aligned; }; -#endif /* __SDHCI_H */ +#endif /* LINUX_MMC_SDHCI_H */ diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 245cdacee54..2a2e9905a24 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -9,8 +9,8 @@ * your option) any later version. */ -#ifndef MMC_SDIO_H -#define MMC_SDIO_H +#ifndef LINUX_MMC_SDIO_H +#define LINUX_MMC_SDIO_H /* SDIO commands type argument response */ #define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */ @@ -161,5 +161,4 @@ #define SDIO_FBR_BLKSIZE 0x10 /* block size (2 bytes) */ -#endif - +#endif /* LINUX_MMC_SDIO_H */ diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 31baaf82f45..50f0bc95232 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -9,8 +9,8 @@ * your option) any later version. */ -#ifndef MMC_SDIO_FUNC_H -#define MMC_SDIO_FUNC_H +#ifndef LINUX_MMC_SDIO_FUNC_H +#define LINUX_MMC_SDIO_FUNC_H #include <linux/device.h> #include <linux/mod_devicetable.h> @@ -161,5 +161,4 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b, extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func); extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags); -#endif - +#endif /* LINUX_MMC_SDIO_FUNC_H */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index a36ab3bc7b0..9f03feedc8e 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -2,8 +2,8 @@ * SDIO Classes, Interface Types, Manufacturer IDs, etc. */ -#ifndef MMC_SDIO_IDS_H -#define MMC_SDIO_IDS_H +#ifndef LINUX_MMC_SDIO_IDS_H +#define LINUX_MMC_SDIO_IDS_H /* * Standard SDIO Function Interfaces @@ -44,4 +44,4 @@ #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 -#endif +#endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h index 9eb9b4b96f5..0222cd8ebe7 100644 --- a/include/linux/mmc/sh_mmcif.h +++ b/include/linux/mmc/sh_mmcif.h @@ -11,8 +11,8 @@ * */ -#ifndef __SH_MMCIF_H__ -#define __SH_MMCIF_H__ +#ifndef LINUX_MMC_SH_MMCIF_H +#define LINUX_MMC_SH_MMCIF_H #include <linux/io.h> #include <linux/platform_device.h> @@ -220,4 +220,4 @@ static inline void sh_mmcif_boot_init(void __iomem *base) sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); } -#endif /* __SH_MMCIF_H__ */ +#endif /* LINUX_MMC_SH_MMCIF_H */ diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h index faf32b6ec18..bd50b365167 100644 --- a/include/linux/mmc/sh_mobile_sdhi.h +++ b/include/linux/mmc/sh_mobile_sdhi.h @@ -1,5 +1,5 @@ -#ifndef __SH_MOBILE_SDHI_H__ -#define __SH_MOBILE_SDHI_H__ +#ifndef LINUX_MMC_SH_MOBILE_SDHI_H +#define LINUX_MMC_SH_MOBILE_SDHI_H #include <linux/types.h> @@ -17,4 +17,4 @@ struct sh_mobile_sdhi_info { int (*get_cd)(struct platform_device *pdev); }; -#endif /* __SH_MOBILE_SDHI_H__ */ +#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h index 19490b942db..a1c1f321e51 100644 --- a/include/linux/mmc/tmio.h +++ b/include/linux/mmc/tmio.h @@ -12,8 +12,8 @@ * * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 */ -#ifndef _LINUX_MMC_TMIO_H_ -#define _LINUX_MMC_TMIO_H_ +#ifndef LINUX_MMC_TMIO_H +#define LINUX_MMC_TMIO_H #define CTL_SD_CMD 0x00 #define CTL_ARG_REG 0x04 @@ -21,6 +21,7 @@ #define CTL_XFER_BLK_COUNT 0xa #define CTL_RESPONSE 0x0c #define CTL_STATUS 0x1c +#define CTL_STATUS2 0x1e #define CTL_IRQ_MASK 0x20 #define CTL_SD_CARD_CLK_CTL 0x24 #define CTL_SD_XFER_LEN 0x26 @@ -30,6 +31,7 @@ #define CTL_TRANSACTION_CTL 0x34 #define CTL_SDIO_STATUS 0x36 #define CTL_SDIO_IRQ_MASK 0x38 +#define CTL_DMA_ENABLE 0xd8 #define CTL_RESET_SD 0xe0 #define CTL_SDIO_REGS 0x100 #define CTL_CLK_AND_WAIT_CTL 0x138 @@ -60,4 +62,4 @@ #define TMIO_BBS 512 /* Boot block size */ -#endif /* _LINUX_MMC_TMIO_H_ */ +#endif /* LINUX_MMC_TMIO_H */ diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 0b89efc6f21..29304855652 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -18,7 +18,6 @@ struct proc_mounts { struct seq_file m; /* must be the first element */ struct mnt_namespace *ns; struct path root; - int event; }; struct fs_struct; diff --git a/include/linux/module.h b/include/linux/module.h index d9ca2d5dc6d..1c30087a2d8 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -48,10 +48,18 @@ struct modversion_info struct module; +struct module_kobject { + struct kobject kobj; + struct module *mod; + struct kobject *drivers_dir; + struct module_param_attrs *mp; +}; + struct module_attribute { - struct attribute attr; - ssize_t (*show)(struct module_attribute *, struct module *, char *); - ssize_t (*store)(struct module_attribute *, struct module *, + struct attribute attr; + ssize_t (*show)(struct module_attribute *, struct module_kobject *, + char *); + ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t count); void (*setup)(struct module *, const char *); int (*test)(struct module *); @@ -65,15 +73,9 @@ struct module_version_attribute { } __attribute__ ((__aligned__(sizeof(void *)))); extern ssize_t __modver_version_show(struct module_attribute *, - struct module *, char *); + struct module_kobject *, char *); -struct module_kobject -{ - struct kobject kobj; - struct module *mod; - struct kobject *drivers_dir; - struct module_param_attrs *mp; -}; +extern struct module_attribute module_uevent; /* These are either module local, or the kernel's dummy ones. */ extern int init_module(void); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index c1f40c2f7ff..b2be02ebf45 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -5,7 +5,12 @@ #include <linux/module.h> #include <linux/elf.h> -/* These must be implemented by the specific architecture */ +/* These may be implemented by architectures that need to hook into the + * module loader code. Architectures that don't need to do anything special + * can just rely on the 'weak' default hooks defined in kernel/module.c. + * Note, however, that at least one of apply_relocate or apply_relocate_add + * must be implemented by each architecture. + */ /* Adjust arch-specific sections. Return 0 on success. */ int module_frob_arch_sections(Elf_Ehdr *hdr, diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 15da0e99f48..db4836bed51 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -155,12 +155,14 @@ struct ubi_device_info { }; /* - * enum - volume notification types. - * @UBI_VOLUME_ADDED: volume has been added - * @UBI_VOLUME_REMOVED: start volume volume - * @UBI_VOLUME_RESIZED: volume size has been re-sized - * @UBI_VOLUME_RENAMED: volume name has been re-named - * @UBI_VOLUME_UPDATED: volume name has been updated + * Volume notification types. + * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a + * volume was created) + * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached + * or a volume was removed) + * @UBI_VOLUME_RESIZED: a volume has been re-sized + * @UBI_VOLUME_RENAMED: a volume has been re-named + * @UBI_VOLUME_UPDATED: data has been written to a volume * * These constants define which type of event has happened when a volume * notification function is invoked. diff --git a/include/linux/mutex.h b/include/linux/mutex.h index a940fe435ac..7f87217e9d1 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -92,7 +92,7 @@ do { \ \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -# define mutex_destroy(mutex) do { } while (0) +static inline void mutex_destroy(struct mutex *lock) {} #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/namei.h b/include/linux/namei.h index eba45ea1029..76fe2c62ae7 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -48,7 +48,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; */ #define LOOKUP_FOLLOW 0x0001 #define LOOKUP_DIRECTORY 0x0002 -#define LOOKUP_CONTINUE 0x0004 #define LOOKUP_PARENT 0x0010 #define LOOKUP_REVAL 0x0020 @@ -75,9 +74,11 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *); extern int kern_path(const char *, unsigned, struct path *); +extern struct dentry *kern_path_create(int, const char *, struct path *, int); +extern struct dentry *user_path_create(int, const char __user *, struct path *, int); extern int kern_path_parent(const char *, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, - const char *, unsigned int, struct nameidata *); + const char *, unsigned int, struct path *); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 54b8b4d7b68..34f3abc6457 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -34,7 +34,6 @@ #include <linux/pm_qos_params.h> #include <linux/timer.h> #include <linux/delay.h> -#include <linux/mm.h> #include <asm/atomic.h> #include <asm/cache.h> #include <asm/byteorder.h> @@ -61,11 +60,6 @@ struct wireless_dev; #define SET_ETHTOOL_OPS(netdev,ops) \ ( (netdev)->ethtool_ops = (ops) ) -#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev - functions are available. */ -#define HAVE_FREE_NETDEV /* free_netdev() */ -#define HAVE_NETDEV_PRIV /* netdev_priv() */ - /* hardware address assignment types */ #define NET_ADDR_PERM 0 /* address is permanent (default) */ #define NET_ADDR_RANDOM 1 /* address is generated randomly */ @@ -258,21 +252,8 @@ struct netdev_hw_addr_list { netdev_hw_addr_list_for_each(ha, &(dev)->mc) struct hh_cache { - struct hh_cache *hh_next; /* Next entry */ - atomic_t hh_refcnt; /* number of users */ -/* - * We want hh_output, hh_len, hh_lock and hh_data be a in a separate - * cache line on SMP. - * They are mostly read, but hh_refcnt may be changed quite frequently, - * incurring cache line ping pongs. - */ - __be16 hh_type ____cacheline_aligned_in_smp; - /* protocol identifier, f.e ETH_P_IP - * NOTE: For VLANs, this will be the - * encapuslated type. --BLG - */ - u16 hh_len; /* length of header */ - int (*hh_output)(struct sk_buff *skb); + u16 hh_len; + u16 __pad; seqlock_t hh_lock; /* cached hardware header; allow for machine alignment needs. */ @@ -284,12 +265,6 @@ struct hh_cache { unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; }; -static inline void hh_cache_put(struct hh_cache *hh) -{ - if (atomic_dec_and_test(&hh->hh_refcnt)) - kfree(hh); -} - /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. * Alternative is: * dev->hard_header_len ? (dev->hard_header_len + @@ -314,8 +289,7 @@ struct header_ops { const void *saddr, unsigned len); int (*parse)(const struct sk_buff *skb, unsigned char *haddr); int (*rebuild)(struct sk_buff *skb); -#define HAVE_HEADER_CACHE - int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); + int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr); @@ -556,7 +530,7 @@ struct netdev_queue { struct Qdisc *qdisc; unsigned long state; struct Qdisc *qdisc_sleeping; -#ifdef CONFIG_RPS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) struct kobject kobj; #endif #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) @@ -794,12 +768,6 @@ struct netdev_tc_txq { * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); - * If device support VLAN receive acceleration - * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called - * when vlan groups for the device changes. Note: grp is NULL - * if no vlan's groups are being used. - * * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) * this function is called when a VLAN id is registered. @@ -888,7 +856,6 @@ struct netdev_tc_txq { * Must return >0 or -errno if it changed dev->features itself. * */ -#define HAVE_NET_DEVICE_OPS struct net_device_ops { int (*ndo_init)(struct net_device *dev); void (*ndo_uninit)(struct net_device *dev); @@ -919,8 +886,6 @@ struct net_device_ops { struct rtnl_link_stats64 *storage); struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); - void (*ndo_vlan_rx_register)(struct net_device *dev, - struct vlan_group *grp); void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, @@ -1097,12 +1062,6 @@ struct net_device { #define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ NETIF_F_FSO) -#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ - NETIF_F_HIGHDMA | \ - NETIF_F_SCTP_CSUM | \ - NETIF_F_ALL_FCOE) - /* * If one device supports one of these features, then enable them * for all in netdev_increment_features. @@ -1220,7 +1179,7 @@ struct net_device { unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ -#ifdef CONFIG_RPS +#if defined(CONFIG_RPS) || defined(CONFIG_XPS) struct kset *queues_kset; struct netdev_rx_queue *_rx; @@ -1348,9 +1307,6 @@ struct net_device { /* max exchange id for FCoE LRO by ddp */ unsigned int fcoe_ddp_xid; #endif - /* n-tuple filter list attached to this device */ - struct ethtool_rx_ntuple_list ethtool_ntuple_list; - /* phy device may attach itself for hardware timestamping */ struct phy_device *phydev; @@ -1563,7 +1519,6 @@ struct packet_type { struct list_head list; }; -#include <linux/interrupt.h> #include <linux/notifier.h> extern rwlock_t dev_base_lock; /* Device list lock */ @@ -1786,8 +1741,6 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); -#define HAVE_NETIF_QUEUE - extern void __netif_schedule(struct Qdisc *q); static inline void netif_schedule_queue(struct netdev_queue *txq) @@ -2063,10 +2016,8 @@ extern void dev_kfree_skb_irq(struct sk_buff *skb); */ extern void dev_kfree_skb_any(struct sk_buff *skb); -#define HAVE_NETIF_RX 1 extern int netif_rx(struct sk_buff *skb); extern int netif_rx_ni(struct sk_buff *skb); -#define HAVE_NETIF_RECEIVE_SKB 1 extern int netif_receive_skb(struct sk_buff *skb); extern gro_result_t dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb); @@ -2246,7 +2197,6 @@ extern void netif_device_attach(struct net_device *dev); /* * Network interface message level settings */ -#define HAVE_NETIF_MSG 1 enum { NETIF_MSG_DRV = 0x0001, @@ -2564,7 +2514,6 @@ static inline u32 netdev_get_wanted_features(struct net_device *dev) return (dev->features & ~dev->hw_features) | dev->wanted_features; } u32 netdev_increment_features(u32 all, u32 one, u32 mask); -u32 netdev_fix_features(struct net_device *dev, u32 features); int __netdev_update_features(struct net_device *dev); void netdev_update_features(struct net_device *dev); void netdev_change_features(struct net_device *dev); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 5a262e3ae71..3540c6e262f 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -104,6 +104,8 @@ enum { IPSET_ATTR_NAMEREF, IPSET_ATTR_IP2, IPSET_ATTR_CIDR2, + IPSET_ATTR_IP2_TO, + IPSET_ATTR_IFACE, __IPSET_ATTR_ADT_MAX, }; #define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1) @@ -142,12 +144,18 @@ enum ipset_errno { enum ipset_cmd_flags { IPSET_FLAG_BIT_EXIST = 0, IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST), + IPSET_FLAG_BIT_LIST_SETNAME = 1, + IPSET_FLAG_LIST_SETNAME = (1 << IPSET_FLAG_BIT_LIST_SETNAME), + IPSET_FLAG_BIT_LIST_HEADER = 2, + IPSET_FLAG_LIST_HEADER = (1 << IPSET_FLAG_BIT_LIST_HEADER), }; /* Flags at CADT attribute level */ enum ipset_cadt_flags { IPSET_FLAG_BIT_BEFORE = 0, IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE), + IPSET_FLAG_BIT_PHYSDEV = 1, + IPSET_FLAG_PHYSDEV = (1 << IPSET_FLAG_BIT_PHYSDEV), }; /* Commands with settype-specific attributes */ @@ -165,6 +173,7 @@ enum ipset_adt { #include <linux/ipv6.h> #include <linux/netlink.h> #include <linux/netfilter.h> +#include <linux/netfilter/x_tables.h> #include <linux/vmalloc.h> #include <net/netlink.h> @@ -206,6 +215,8 @@ enum ip_set_feature { IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG), IPSET_TYPE_NAME_FLAG = 4, IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG), + IPSET_TYPE_IFACE_FLAG = 5, + IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG), /* Strictly speaking not a feature, but a flag for dumping: * this settype must be dumped last */ IPSET_DUMP_LAST_FLAG = 7, @@ -214,7 +225,17 @@ enum ip_set_feature { struct ip_set; -typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout); +typedef int (*ipset_adtfn)(struct ip_set *set, void *value, + u32 timeout, u32 flags); + +/* Kernel API function options */ +struct ip_set_adt_opt { + u8 family; /* Actual protocol family */ + u8 dim; /* Dimension of match/target */ + u8 flags; /* Direction and negation flags */ + u32 cmdflags; /* Command-like flags */ + u32 timeout; /* Timeout value */ +}; /* Set type, variant-specific part */ struct ip_set_type_variant { @@ -223,14 +244,15 @@ struct ip_set_type_variant { * zero for no match/success to add/delete * positive for matching element */ int (*kadt)(struct ip_set *set, const struct sk_buff * skb, - enum ipset_adt adt, u8 pf, u8 dim, u8 flags); + const struct xt_action_param *par, + enum ipset_adt adt, const struct ip_set_adt_opt *opt); /* Userspace: test/add/del entries * returns negative error code, * zero for no match/success to add/delete * positive for matching element */ int (*uadt)(struct ip_set *set, struct nlattr *tb[], - enum ipset_adt adt, u32 *lineno, u32 flags); + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); /* Low level add/del/test functions */ ipset_adtfn adt[IPSET_ADT_MAX]; @@ -268,8 +290,8 @@ struct ip_set_type { u8 dimension; /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */ u8 family; - /* Type revision */ - u8 revision; + /* Type revisions */ + u8 revision_min, revision_max; /* Create set */ int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags); @@ -300,6 +322,8 @@ struct ip_set { const struct ip_set_type_variant *variant; /* The actual INET family of the set */ u8 family; + /* The type revision */ + u8 revision; /* The type specific data */ void *data; }; @@ -307,21 +331,25 @@ struct ip_set { /* register and unregister set references */ extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set); extern void ip_set_put_byindex(ip_set_id_t index); -extern const char * ip_set_name_byindex(ip_set_id_t index); +extern const char *ip_set_name_byindex(ip_set_id_t index); extern ip_set_id_t ip_set_nfnl_get(const char *name); extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index); extern void ip_set_nfnl_put(ip_set_id_t index); /* API for iptables set match, and SET target */ + extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb, - u8 family, u8 dim, u8 flags); + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb, - u8 family, u8 dim, u8 flags); + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb, - u8 family, u8 dim, u8 flags); + const struct xt_action_param *par, + const struct ip_set_adt_opt *opt); /* Utility functions */ -extern void * ip_set_alloc(size_t size); +extern void *ip_set_alloc(size_t size); extern void ip_set_free(void *members); extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); @@ -331,7 +359,7 @@ ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) { __be32 ip; int ret = ip_set_get_ipaddr4(nla, &ip); - + if (ret) return ret; *ipaddr = ntohl(ip); diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index ac3c822eb39..b89fb79cb44 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h @@ -5,6 +5,11 @@ #include <linux/jhash.h> #include <linux/netfilter/ipset/ip_set_timeout.h> +#define CONCAT(a, b, c) a##b##c +#define TOKEN(a, b, c) CONCAT(a, b, c) + +#define type_pf_next TOKEN(TYPE, PF, _elem) + /* Hashing which uses arrays to resolve clashing. The hash table is resized * (doubled) when searching becomes too long. * Internally jhash is used with the assumption that the size of the @@ -23,7 +28,32 @@ /* Number of elements to store in an initial array block */ #define AHASH_INIT_SIZE 4 /* Max number of elements to store in an array block */ -#define AHASH_MAX_SIZE (3*4) +#define AHASH_MAX_SIZE (3*AHASH_INIT_SIZE) + +/* Max number of elements can be tuned */ +#ifdef IP_SET_HASH_WITH_MULTI +#define AHASH_MAX(h) ((h)->ahash_max) + +static inline u8 +tune_ahash_max(u8 curr, u32 multi) +{ + u32 n; + + if (multi < curr) + return curr; + + n = curr + AHASH_INIT_SIZE; + /* Currently, at listing one hash bucket must fit into a message. + * Therefore we have a hard limit here. + */ + return n > curr && n <= 64 ? n : curr; +} +#define TUNE_AHASH_MAX(h, multi) \ + ((h)->ahash_max = tune_ahash_max((h)->ahash_max, multi)) +#else +#define AHASH_MAX(h) AHASH_MAX_SIZE +#define TUNE_AHASH_MAX(h, multi) +#endif /* A hash bucket */ struct hbucket { @@ -38,7 +68,7 @@ struct htable { struct hbucket bucket[0]; /* hashtable buckets */ }; -#define hbucket(h, i) &((h)->bucket[i]) +#define hbucket(h, i) (&((h)->bucket[i])) /* Book-keeping of the prefixes added to the set */ struct ip_set_hash_nets { @@ -54,9 +84,16 @@ struct ip_set_hash { u32 initval; /* random jhash init value */ u32 timeout; /* timeout value, if enabled */ struct timer_list gc; /* garbage collection when timeout enabled */ + struct type_pf_next next; /* temporary storage for uadd */ +#ifdef IP_SET_HASH_WITH_MULTI + u8 ahash_max; /* max elements in an array block */ +#endif #ifdef IP_SET_HASH_WITH_NETMASK u8 netmask; /* netmask value for subnets to store */ #endif +#ifdef IP_SET_HASH_WITH_RBTREE + struct rb_root rbtree; +#endif #ifdef IP_SET_HASH_WITH_NETS struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */ #endif @@ -194,17 +231,24 @@ ip_set_hash_destroy(struct ip_set *set) del_timer_sync(&h->gc); ahash_destroy(h->table); +#ifdef IP_SET_HASH_WITH_RBTREE + rbtree_destroy(&h->rbtree); +#endif kfree(h); set->data = NULL; } -#define HKEY(data, initval, htable_bits) \ -(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \ - & jhash_mask(htable_bits)) - #endif /* _IP_SET_AHASH_H */ +#ifndef HKEY_DATALEN +#define HKEY_DATALEN sizeof(struct type_pf_elem) +#endif + +#define HKEY(data, initval, htable_bits) \ +(jhash2((u32 *)(data), HKEY_DATALEN/sizeof(u32), initval) \ + & jhash_mask(htable_bits)) + #define CONCAT(a, b, c) a##b##c #define TOKEN(a, b, c) CONCAT(a, b, c) @@ -217,6 +261,7 @@ ip_set_hash_destroy(struct ip_set *set) #define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask) #define type_pf_data_list TOKEN(TYPE, PF, _data_list) #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) +#define type_pf_data_next TOKEN(TYPE, PF, _data_next) #define type_pf_elem TOKEN(TYPE, PF, _elem) #define type_pf_telem TOKEN(TYPE, PF, _telem) @@ -262,12 +307,13 @@ ip_set_hash_destroy(struct ip_set *set) /* Add an element to the hash table when resizing the set: * we spare the maintenance of the internal counters. */ static int -type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value) +type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value, + u8 ahash_max) { if (n->pos >= n->size) { void *tmp; - if (n->size >= AHASH_MAX_SIZE) + if (n->size >= ahash_max) /* Trigger rehashing */ return -EAGAIN; @@ -322,7 +368,7 @@ retry: for (j = 0; j < n->pos; j++) { data = ahash_data(n, j); m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_add(m, data); + ret = type_pf_elem_add(m, data, AHASH_MAX(h)); if (ret < 0) { read_unlock_bh(&set->lock); ahash_destroy(t); @@ -346,17 +392,20 @@ retry: return 0; } +static inline void +type_pf_data_next(struct ip_set_hash *h, const struct type_pf_elem *d); + /* Add an element to a hash and update the internal counters when succeeded, * otherwise report the proper error code. */ static int -type_pf_add(struct ip_set *set, void *value, u32 timeout) +type_pf_add(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t; const struct type_pf_elem *d = value; struct hbucket *n; int i, ret = 0; - u32 key; + u32 key, multi = 0; if (h->elements >= h->maxelem) return -IPSET_ERR_HASH_FULL; @@ -366,14 +415,17 @@ type_pf_add(struct ip_set *set, void *value, u32 timeout) key = HKEY(value, h->initval, t->htable_bits); n = hbucket(t, key); for (i = 0; i < n->pos; i++) - if (type_pf_data_equal(ahash_data(n, i), d)) { + if (type_pf_data_equal(ahash_data(n, i), d, &multi)) { ret = -IPSET_ERR_EXIST; goto out; } - - ret = type_pf_elem_add(n, value); - if (ret != 0) + TUNE_AHASH_MAX(h, multi); + ret = type_pf_elem_add(n, value, AHASH_MAX(h)); + if (ret != 0) { + if (ret == -EAGAIN) + type_pf_data_next(h, d); goto out; + } #ifdef IP_SET_HASH_WITH_NETS add_cidr(h, d->cidr, HOST_MASK); @@ -388,7 +440,7 @@ out: * and free up space if possible. */ static int -type_pf_del(struct ip_set *set, void *value, u32 timeout) +type_pf_del(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t = h->table; @@ -396,13 +448,13 @@ type_pf_del(struct ip_set *set, void *value, u32 timeout) struct hbucket *n; int i; struct type_pf_elem *data; - u32 key; + u32 key, multi = 0; key = HKEY(value, h->initval, t->htable_bits); n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_data(n, i); - if (!type_pf_data_equal(data, d)) + if (!type_pf_data_equal(data, d, &multi)) continue; if (i != n->pos - 1) /* Not last one */ @@ -443,17 +495,17 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) struct hbucket *n; const struct type_pf_elem *data; int i, j = 0; - u32 key; + u32 key, multi = 0; u8 host_mask = SET_HOST_MASK(set->family); pr_debug("test by nets\n"); - for (; j < host_mask && h->nets[j].cidr; j++) { + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { type_pf_data_netmask(d, h->nets[j].cidr); key = HKEY(d, h->initval, t->htable_bits); n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_data(n, i); - if (type_pf_data_equal(data, d)) + if (type_pf_data_equal(data, d, &multi)) return 1; } } @@ -463,7 +515,7 @@ type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) /* Test whether the element is added to the set */ static int -type_pf_test(struct ip_set *set, void *value, u32 timeout) +type_pf_test(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t = h->table; @@ -471,7 +523,7 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout) struct hbucket *n; const struct type_pf_elem *data; int i; - u32 key; + u32 key, multi = 0; #ifdef IP_SET_HASH_WITH_NETS /* If we test an IP address and not a network address, @@ -484,7 +536,7 @@ type_pf_test(struct ip_set *set, void *value, u32 timeout) n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_data(n, i); - if (type_pf_data_equal(data, d)) + if (type_pf_data_equal(data, d, &multi)) return 1; } return 0; @@ -586,10 +638,11 @@ nla_put_failure: static int type_pf_kadt(struct ip_set *set, const struct sk_buff * skb, - enum ipset_adt adt, u8 pf, u8 dim, u8 flags); + const struct xt_action_param *par, + enum ipset_adt adt, const struct ip_set_adt_opt *opt); static int type_pf_uadt(struct ip_set *set, struct nlattr *tb[], - enum ipset_adt adt, u32 *lineno, u32 flags); + enum ipset_adt adt, u32 *lineno, u32 flags, bool retried); static const struct ip_set_type_variant type_pf_variant = { .kadt = type_pf_kadt, @@ -640,14 +693,14 @@ type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout) static int type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value, - u32 timeout) + u8 ahash_max, u32 timeout) { struct type_pf_elem *data; if (n->pos >= n->size) { void *tmp; - if (n->size >= AHASH_MAX_SIZE) + if (n->size >= ahash_max) /* Trigger rehashing */ return -EAGAIN; @@ -752,7 +805,7 @@ retry: for (j = 0; j < n->pos; j++) { data = ahash_tdata(n, j); m = hbucket(t, HKEY(data, h->initval, htable_bits)); - ret = type_pf_elem_tadd(m, data, + ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), type_pf_data_timeout(data)); if (ret < 0) { read_unlock_bh(&set->lock); @@ -776,15 +829,16 @@ retry: } static int -type_pf_tadd(struct ip_set *set, void *value, u32 timeout) +type_pf_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t = h->table; const struct type_pf_elem *d = value; struct hbucket *n; struct type_pf_elem *data; - int ret = 0, i, j = AHASH_MAX_SIZE + 1; - u32 key; + int ret = 0, i, j = AHASH_MAX(h) + 1; + bool flag_exist = flags & IPSET_FLAG_EXIST; + u32 key, multi = 0; if (h->elements >= h->maxelem) /* FIXME: when set is full, we slow down here */ @@ -798,18 +852,18 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout) n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_tdata(n, i); - if (type_pf_data_equal(data, d)) { - if (type_pf_data_expired(data)) + if (type_pf_data_equal(data, d, &multi)) { + if (type_pf_data_expired(data) || flag_exist) j = i; else { ret = -IPSET_ERR_EXIST; goto out; } - } else if (j == AHASH_MAX_SIZE + 1 && + } else if (j == AHASH_MAX(h) + 1 && type_pf_data_expired(data)) j = i; } - if (j != AHASH_MAX_SIZE + 1) { + if (j != AHASH_MAX(h) + 1) { data = ahash_tdata(n, j); #ifdef IP_SET_HASH_WITH_NETS del_cidr(h, data->cidr, HOST_MASK); @@ -819,9 +873,13 @@ type_pf_tadd(struct ip_set *set, void *value, u32 timeout) type_pf_data_timeout_set(data, timeout); goto out; } - ret = type_pf_elem_tadd(n, d, timeout); - if (ret != 0) + TUNE_AHASH_MAX(h, multi); + ret = type_pf_elem_tadd(n, d, AHASH_MAX(h), timeout); + if (ret != 0) { + if (ret == -EAGAIN) + type_pf_data_next(h, d); goto out; + } #ifdef IP_SET_HASH_WITH_NETS add_cidr(h, d->cidr, HOST_MASK); @@ -833,7 +891,7 @@ out: } static int -type_pf_tdel(struct ip_set *set, void *value, u32 timeout) +type_pf_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t = h->table; @@ -841,13 +899,13 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout) struct hbucket *n; int i; struct type_pf_elem *data; - u32 key; + u32 key, multi = 0; key = HKEY(value, h->initval, t->htable_bits); n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_tdata(n, i); - if (!type_pf_data_equal(data, d)) + if (!type_pf_data_equal(data, d, &multi)) continue; if (type_pf_data_expired(data)) return -IPSET_ERR_EXIST; @@ -887,16 +945,16 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) struct type_pf_elem *data; struct hbucket *n; int i, j = 0; - u32 key; + u32 key, multi = 0; u8 host_mask = SET_HOST_MASK(set->family); - for (; j < host_mask && h->nets[j].cidr; j++) { + for (; j < host_mask && h->nets[j].cidr && !multi; j++) { type_pf_data_netmask(d, h->nets[j].cidr); key = HKEY(d, h->initval, t->htable_bits); n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_tdata(n, i); - if (type_pf_data_equal(data, d)) + if (type_pf_data_equal(data, d, &multi)) return !type_pf_data_expired(data); } } @@ -905,14 +963,14 @@ type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout) #endif static int -type_pf_ttest(struct ip_set *set, void *value, u32 timeout) +type_pf_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags) { struct ip_set_hash *h = set->data; struct htable *t = h->table; struct type_pf_elem *data, *d = value; struct hbucket *n; int i; - u32 key; + u32 key, multi = 0; #ifdef IP_SET_HASH_WITH_NETS if (d->cidr == SET_HOST_MASK(set->family)) @@ -922,7 +980,7 @@ type_pf_ttest(struct ip_set *set, void *value, u32 timeout) n = hbucket(t, key); for (i = 0; i < n->pos; i++) { data = ahash_tdata(n, i); - if (type_pf_data_equal(data, d)) + if (type_pf_data_equal(data, d, &multi)) return !type_pf_data_expired(data); } return 0; @@ -1030,6 +1088,8 @@ type_pf_gc_init(struct ip_set *set) IPSET_GC_PERIOD(h->timeout)); } +#undef HKEY_DATALEN +#undef HKEY #undef type_pf_data_equal #undef type_pf_data_isnull #undef type_pf_data_copy diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h index b86f15c0452..e2a9fae767f 100644 --- a/include/linux/netfilter/ipset/ip_set_hash.h +++ b/include/linux/netfilter/ipset/ip_set_hash.h @@ -11,6 +11,10 @@ enum { IPSET_ERR_INVALID_PROTO, /* Protocol missing but must be specified */ IPSET_ERR_MISSING_PROTO, + /* Range not supported */ + IPSET_ERR_HASH_RANGE_UNSUPPORTED, + /* Invalid range */ + IPSET_ERR_HASH_RANGE, }; #ifdef __KERNEL__ diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index bcdd40ad39e..47923205a4a 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h @@ -22,6 +22,9 @@ #define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT) +#define opt_timeout(opt, map) \ + (with_timeout((opt)->timeout) ? (opt)->timeout : (map)->timeout) + static inline unsigned int ip_set_timeout_uget(struct nlattr *tb) { @@ -75,7 +78,7 @@ ip_set_timeout_set(u32 timeout) static inline u32 ip_set_timeout_get(unsigned long timeout) { - return timeout == IPSET_ELEM_PERMANENT ? 0 : + return timeout == IPSET_ELEM_PERMANENT ? 0 : jiffies_to_msecs(timeout - jiffies)/1000; } diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h index 0e1fb50da56..199fd11fedc 100644 --- a/include/linux/netfilter/ipset/pfxlen.h +++ b/include/linux/netfilter/ipset/pfxlen.h @@ -2,7 +2,8 @@ #define _PFXLEN_H #include <asm/byteorder.h> -#include <linux/netfilter.h> +#include <linux/netfilter.h> +#include <net/tcp.h> /* Prefixlen maps, by Jan Engelhardt */ extern const union nf_inet_addr ip_set_netmask_map[]; @@ -32,4 +33,12 @@ ip_set_hostmask6(u8 pfxlen) return &ip_set_hostmask_map[pfxlen].ip6[0]; } +extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr); + +#define ip_set_mask_from_to(from, to, cidr) \ +do { \ + from &= ip_set_hostmask(cidr); \ + to = from | ~ip_set_hostmask(cidr); \ +} while (0) + #endif /*_PFXLEN_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 2b11fc1a86b..74d33861473 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -60,6 +60,9 @@ struct nfnl_callback { int (*call)(struct sock *nl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[]); + int (*call_rcu)(struct sock *nl, struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); const struct nla_policy *policy; /* netlink attribute policy */ const u_int16_t attr_count; /* number of nlattr's */ }; diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h index af94e0014eb..24b32e6c009 100644 --- a/include/linux/netfilter/nfnetlink_queue.h +++ b/include/linux/netfilter/nfnetlink_queue.h @@ -8,6 +8,7 @@ enum nfqnl_msg_types { NFQNL_MSG_PACKET, /* packet from kernel to userspace */ NFQNL_MSG_VERDICT, /* verdict from userspace to kernel */ NFQNL_MSG_CONFIG, /* connect to a particular queue */ + NFQNL_MSG_VERDICT_BATCH, /* batchv from userspace to kernel */ NFQNL_MSG_MAX }; diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h index 081f1ded284..c0405ac9287 100644 --- a/include/linux/netfilter/xt_set.h +++ b/include/linux/netfilter/xt_set.h @@ -35,7 +35,7 @@ struct xt_set_info_target_v0 { struct xt_set_info_v0 del_set; }; -/* Revision 1: current interface to netfilter/iptables */ +/* Revision 1 match and target */ struct xt_set_info { ip_set_id_t index; @@ -44,13 +44,22 @@ struct xt_set_info { }; /* match and target infos */ -struct xt_set_info_match { +struct xt_set_info_match_v1 { struct xt_set_info match_set; }; -struct xt_set_info_target { +struct xt_set_info_target_v1 { struct xt_set_info add_set; struct xt_set_info del_set; }; +/* Revision 2 target */ + +struct xt_set_info_target_v2 { + struct xt_set_info add_set; + struct xt_set_info del_set; + u32 flags; + u32 timeout; +}; + #endif /*_XT_SET_H*/ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index a9dd89552f9..2e17c5dbdcb 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -49,6 +49,7 @@ struct nlmsghdr { #define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */ #define NLM_F_ACK 4 /* Reply with ack, with zero or error code */ #define NLM_F_ECHO 8 /* Echo this request */ +#define NLM_F_DUMP_INTR 16 /* Dump was inconsistent due to sequence change */ /* Modifiers to GET request */ #define NLM_F_ROOT 0x100 /* specify tree root */ @@ -221,7 +222,9 @@ struct netlink_callback { int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); int (*done)(struct netlink_callback *cb); - int family; + u16 family; + u16 min_dump_alloc; + unsigned int prev_seq, seq; long args[6]; }; @@ -259,7 +262,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback*), - int (*done)(struct netlink_callback*)); + int (*done)(struct netlink_callback*), + u16 min_dump_alloc); #define NL_NONROOT_RECV 0x1 diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 79358bb712c..5dfa091c334 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -40,8 +40,6 @@ struct netpoll_info { struct netpoll *netpoll; }; -void netpoll_poll_dev(struct net_device *dev); -void netpoll_poll(struct netpoll *np); void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); diff --git a/include/linux/nfc.h b/include/linux/nfc.h new file mode 100644 index 00000000000..330a4c5db58 --- /dev/null +++ b/include/linux/nfc.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * + * Authors: + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __LINUX_NFC_H +#define __LINUX_NFC_H + +#include <linux/types.h> +#include <linux/socket.h> + +#define NFC_GENL_NAME "nfc" +#define NFC_GENL_VERSION 1 + +#define NFC_GENL_MCAST_EVENT_NAME "events" + +/** + * enum nfc_commands - supported nfc commands + * + * @NFC_CMD_UNSPEC: unspecified command + * + * @NFC_CMD_GET_DEVICE: request information about a device (requires + * %NFC_ATTR_DEVICE_INDEX) or dump request to get a list of all nfc devices + * @NFC_CMD_START_POLL: start polling for targets using the given protocols + * (requires %NFC_ATTR_DEVICE_INDEX and %NFC_ATTR_PROTOCOLS) + * @NFC_CMD_STOP_POLL: stop polling for targets (requires + * %NFC_ATTR_DEVICE_INDEX) + * @NFC_CMD_GET_TARGET: dump all targets found by the previous poll (requires + * %NFC_ATTR_DEVICE_INDEX) + * @NFC_EVENT_TARGETS_FOUND: event emitted when a new target is found + * (it sends %NFC_ATTR_DEVICE_INDEX) + * @NFC_EVENT_DEVICE_ADDED: event emitted when a new device is registred + * (it sends %NFC_ATTR_DEVICE_NAME, %NFC_ATTR_DEVICE_INDEX and + * %NFC_ATTR_PROTOCOLS) + * @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed + * (it sends %NFC_ATTR_DEVICE_INDEX) + */ +enum nfc_commands { + NFC_CMD_UNSPEC, + NFC_CMD_GET_DEVICE, + NFC_CMD_START_POLL, + NFC_CMD_STOP_POLL, + NFC_CMD_GET_TARGET, + NFC_EVENT_TARGETS_FOUND, + NFC_EVENT_DEVICE_ADDED, + NFC_EVENT_DEVICE_REMOVED, +/* private: internal use only */ + __NFC_CMD_AFTER_LAST +}; +#define NFC_CMD_MAX (__NFC_CMD_AFTER_LAST - 1) + +/** + * enum nfc_attrs - supported nfc attributes + * + * @NFC_ATTR_UNSPEC: unspecified attribute + * + * @NFC_ATTR_DEVICE_INDEX: index of nfc device + * @NFC_ATTR_DEVICE_NAME: device name, max 8 chars + * @NFC_ATTR_PROTOCOLS: nfc protocols - bitwise or-ed combination from + * NFC_PROTO_*_MASK constants + * @NFC_ATTR_TARGET_INDEX: index of the nfc target + * @NFC_ATTR_TARGET_SENS_RES: NFC-A targets extra information such as NFCID + * @NFC_ATTR_TARGET_SEL_RES: NFC-A targets extra information (useful if the + * target is not NFC-Forum compliant) + */ +enum nfc_attrs { + NFC_ATTR_UNSPEC, + NFC_ATTR_DEVICE_INDEX, + NFC_ATTR_DEVICE_NAME, + NFC_ATTR_PROTOCOLS, + NFC_ATTR_TARGET_INDEX, + NFC_ATTR_TARGET_SENS_RES, + NFC_ATTR_TARGET_SEL_RES, +/* private: internal use only */ + __NFC_ATTR_AFTER_LAST +}; +#define NFC_ATTR_MAX (__NFC_ATTR_AFTER_LAST - 1) + +#define NFC_DEVICE_NAME_MAXSIZE 8 + +/* NFC protocols */ +#define NFC_PROTO_JEWEL 1 +#define NFC_PROTO_MIFARE 2 +#define NFC_PROTO_FELICA 3 +#define NFC_PROTO_ISO14443 4 +#define NFC_PROTO_NFC_DEP 5 + +#define NFC_PROTO_MAX 6 + +/* NFC protocols masks used in bitsets */ +#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL) +#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE) +#define NFC_PROTO_FELICA_MASK (1 << NFC_PROTO_FELICA) +#define NFC_PROTO_ISO14443_MASK (1 << NFC_PROTO_ISO14443) +#define NFC_PROTO_NFC_DEP_MASK (1 << NFC_PROTO_NFC_DEP) + +struct sockaddr_nfc { + sa_family_t sa_family; + __u32 dev_idx; + __u32 target_idx; + __u32 nfc_protocol; +}; + +/* NFC socket protocols */ +#define NFC_SOCKPROTO_RAW 0 +#define NFC_SOCKPROTO_MAX 1 + +#endif /*__LINUX_NFC_H */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1b93b9c60e5..8b579beb635 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -85,7 +85,7 @@ struct nfs_lock_context { struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; - struct path path; + struct dentry *dentry; struct rpc_cred *cred; struct nfs4_state *state; fmode_t mode; @@ -360,7 +360,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); -extern int nfs_permission(struct inode *, int, unsigned int); +extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); @@ -372,7 +372,7 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); -extern struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index c7ccaae15af..e4da76c9e4d 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -247,7 +247,8 @@ * passed, all channels allowed for the current regulatory domain * are used. Extra IEs can also be passed from the userspace by * using the %NL80211_ATTR_IE attribute. - * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan + * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT + * if scheduled scan is not running. * @NL80211_CMD_SCHED_SCAN_RESULTS: indicates that there are scheduled scan * results available. * @NL80211_CMD_SCHED_SCAN_STOPPED: indicates that the scheduled scan has @@ -483,6 +484,14 @@ * more background information, see * http://wireless.kernel.org/en/users/Documentation/WoWLAN. * + * @NL80211_CMD_SET_REKEY_OFFLOAD: This command is used give the driver + * the necessary information for supporting GTK rekey offload. This + * feature is typically used during WoWLAN. The configuration data + * is contained in %NL80211_ATTR_REKEY_DATA (which is nested and + * contains the data in sub-attributes). After rekeying happened, + * this command may also be sent by the driver as an MLME event to + * inform userspace of the new replay counter. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -605,6 +614,8 @@ enum nl80211_commands { NL80211_CMD_SCHED_SCAN_RESULTS, NL80211_CMD_SCHED_SCAN_STOPPED, + NL80211_CMD_SET_REKEY_OFFLOAD, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -745,8 +756,12 @@ enum nl80211_commands { * * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with * a single scan request, a wiphy attribute. + * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS: number of SSIDs you can + * scan with a single scheduled scan request, a wiphy attribute. * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements * that can be added to a scan request + * @NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN: maximum length of information + * elements that can be added to a scheduled scan request * * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz) * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive @@ -978,8 +993,8 @@ enum nl80211_commands { * driving the peer link management state machine. * @NL80211_MESH_SETUP_USERSPACE_AMPE must be enabled. * - * @NL80211_ATTR_WOWLAN_SUPPORTED: indicates, as part of the wiphy capabilities, - * the supported WoWLAN triggers + * @NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED: indicates, as part of the wiphy + * capabilities, the supported WoWLAN triggers * @NL80211_ATTR_WOWLAN_TRIGGERS: used by %NL80211_CMD_SET_WOWLAN to * indicate which WoW triggers should be enabled. This is also * used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN @@ -996,6 +1011,14 @@ enum nl80211_commands { * are managed in software: interfaces of these types aren't subject to * any restrictions in their number or combinations. * + * @%NL80211_ATTR_REKEY_DATA: nested attribute containing the information + * necessary for GTK rekeying in the device, see &enum nl80211_rekey_data. + * + * @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan, + * nested array attribute containing an entry for each band, with the entry + * being a list of supported rates as defined by IEEE 802.11 7.3.2.2 but + * without the length restriction (at most %NL80211_MAX_SUPP_RATES). + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -1194,6 +1217,13 @@ enum nl80211_attrs { NL80211_ATTR_INTERFACE_COMBINATIONS, NL80211_ATTR_SOFTWARE_IFTYPES, + NL80211_ATTR_REKEY_DATA, + + NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, + NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, + + NL80211_ATTR_SCAN_SUPP_RATES, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -2239,6 +2269,16 @@ struct nl80211_wowlan_pattern_support { * * In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute * carrying a &struct nl80211_wowlan_pattern_support. + * @NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED: Not a real trigger, and cannot be + * used when setting, used only to indicate that GTK rekeying is supported + * by the device (flag) + * @NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE: wake up on GTK rekey failure (if + * done by the device) (flag) + * @NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST: wake up on EAP Identity Request + * packet (flag) + * @NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE: wake up on 4-way handshake (flag) + * @NL80211_WOWLAN_TRIG_RFKILL_RELEASE: wake up when rfkill is released + * (on devices that have rfkill in the device) (flag) * @NUM_NL80211_WOWLAN_TRIG: number of wake on wireless triggers * @MAX_NL80211_WOWLAN_TRIG: highest wowlan trigger attribute number */ @@ -2248,6 +2288,11 @@ enum nl80211_wowlan_triggers { NL80211_WOWLAN_TRIG_DISCONNECT, NL80211_WOWLAN_TRIG_MAGIC_PKT, NL80211_WOWLAN_TRIG_PKT_PATTERN, + NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED, + NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE, + NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST, + NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE, + NL80211_WOWLAN_TRIG_RFKILL_RELEASE, /* keep last */ NUM_NL80211_WOWLAN_TRIG, @@ -2361,4 +2406,28 @@ enum nl80211_plink_state { MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1 }; +#define NL80211_KCK_LEN 16 +#define NL80211_KEK_LEN 16 +#define NL80211_REPLAY_CTR_LEN 8 + +/** + * enum nl80211_rekey_data - attributes for GTK rekey offload + * @__NL80211_REKEY_DATA_INVALID: invalid number for nested attributes + * @NL80211_REKEY_DATA_KEK: key encryption key (binary) + * @NL80211_REKEY_DATA_KCK: key confirmation key (binary) + * @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary) + * @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal) + * @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal) + */ +enum nl80211_rekey_data { + __NL80211_REKEY_DATA_INVALID, + NL80211_REKEY_DATA_KEK, + NL80211_REKEY_DATA_KCK, + NL80211_REKEY_DATA_REPLAY_CTR, + + /* keep last */ + NUM_NL80211_REKEY_DATA, + MAX_NL80211_REKEY_DATA = NUM_NL80211_REKEY_DATA - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 50d20aba57d..cc37a55ad00 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -68,6 +68,7 @@ void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); void free_nsproxy(struct nsproxy *ns); int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, struct fs_struct *); +int __init nsproxy_cache_init(void); static inline void put_nsproxy(struct nsproxy *ns) { diff --git a/include/linux/of.h b/include/linux/of.h index bfc0ed1b0ce..bd716f8908d 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -195,6 +195,13 @@ extern struct device_node *of_find_node_with_property( extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); +extern int of_property_read_u32_array(const struct device_node *np, + char *propname, + u32 *out_values, + size_t sz); + +extern int of_property_read_string(struct device_node *np, char *propname, + const char **out_string); extern int of_device_is_compatible(const struct device_node *device, const char *); extern int of_device_is_available(const struct device_node *device); @@ -227,12 +234,32 @@ extern void of_attach_node(struct device_node *); extern void of_detach_node(struct device_node *); #endif -#else +#else /* CONFIG_OF */ static inline bool of_have_populated_dt(void) { return false; } +static inline int of_property_read_u32_array(const struct device_node *np, + char *propname, u32 *out_values, size_t sz) +{ + return -ENOSYS; +} + +static inline int of_property_read_string(struct device_node *np, + char *propname, const char **out_string) +{ + return -ENOSYS; +} + #endif /* CONFIG_OF */ + +static inline int of_property_read_u32(const struct device_node *np, + char *propname, + u32 *out_value) +{ + return of_property_read_u32_array(np, propname, out_value, 1); +} + #endif /* _LINUX_OF_H */ diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 2feda6ee614..3118623c2c1 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -1,11 +1,16 @@ #ifndef __OF_ADDRESS_H #define __OF_ADDRESS_H #include <linux/ioport.h> +#include <linux/errno.h> #include <linux/of.h> extern u64 of_translate_address(struct device_node *np, const __be32 *addr); extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); +extern struct device_node *of_find_matching_node_by_address( + struct device_node *from, + const struct of_device_id *matches, + u64 base_address); extern void __iomem *of_iomap(struct device_node *device, int index); /* Extract an address from a device, returns the region size and diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 6598c04dab0..aec8025c786 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -46,8 +46,9 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern int of_get_gpio_flags(struct device_node *np, int index, - enum of_gpio_flags *flags); +extern int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + extern unsigned int of_gpio_count(struct device_node *np); extern int of_mm_gpiochip_add(struct device_node *np, @@ -60,8 +61,8 @@ extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np); #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline int of_get_gpio_flags(struct device_node *np, int index, - enum of_gpio_flags *flags) +static inline int of_get_named_gpio_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags) { return -ENOSYS; } @@ -77,7 +78,38 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { } #endif /* CONFIG_OF_GPIO */ /** - * of_get_gpio - Get a GPIO number to use with GPIO API + * of_get_gpio_flags() - Get a GPIO number and flags to use with GPIO API + * @np: device node to get GPIO from + * @index: index of the GPIO + * @flags: a flags pointer to fill in + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. If @flags is not NULL the function also fills + * in flags for the GPIO. + */ +static inline int of_get_gpio_flags(struct device_node *np, int index, + enum of_gpio_flags *flags) +{ + return of_get_named_gpio_flags(np, "gpios", index, flags); +} + +/** + * of_get_named_gpio() - Get a GPIO number to use with GPIO API + * @np: device node to get GPIO from + * @propname: Name of property containing gpio specifier(s) + * @index: index of the GPIO + * + * Returns GPIO number to use with Linux generic GPIO API, or one of the errno + * value on the error condition. + */ +static inline int of_get_named_gpio(struct device_node *np, + const char *propname, int index) +{ + return of_get_named_gpio_flags(np, propname, index, NULL); +} + +/** + * of_get_gpio() - Get a GPIO number to use with GPIO API * @np: device node to get GPIO from * @index: index of the GPIO * diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index 85a27b650d7..f93e21700d3 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h @@ -6,4 +6,9 @@ struct pci_dev; struct of_irq; int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); + +struct device_node; +struct device_node *of_pci_find_child_device(struct device_node *parent, + unsigned int devfn); + #endif diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index fb51ae38cea..5a6f458a4bb 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -20,6 +20,40 @@ #include <linux/platform_device.h> /** + * struct of_dev_auxdata - lookup table entry for device names & platform_data + * @compatible: compatible value of node to match against node + * @phys_addr: Start address of registers to match against node + * @name: Name to assign for matching nodes + * @platform_data: platform_data to assign for matching nodes + * + * This lookup table allows the caller of of_platform_populate() to override + * the names of devices when creating devices from the device tree. The table + * should be terminated with an empty entry. It also allows the platform_data + * pointer to be set. + * + * The reason for this functionality is that some Linux infrastructure uses + * the device name to look up a specific device, but the Linux-specific names + * are not encoded into the device tree, so the kernel needs to provide specific + * values. + * + * Note: Using an auxdata lookup table should be considered a last resort when + * converting a platform to use the DT. Normally the automatically generated + * device name will not matter, and drivers should obtain data from the device + * node instead of from an anonymouns platform_data pointer. + */ +struct of_dev_auxdata { + char *compatible; + resource_size_t phys_addr; + char *name; + void *platform_data; +}; + +/* Macro to simplify populating a lookup table */ +#define OF_DEV_AUXDATA(_compat,_phys,_name,_pdata) \ + { .compatible = _compat, .phys_addr = _phys, .name = _name, \ + .platform_data = _pdata } + +/** * of_platform_driver - Legacy of-aware driver for platform devices. * * An of_platform_driver driver is attached to a basic platform_device on @@ -40,6 +74,8 @@ struct of_platform_driver #define to_of_platform_driver(drv) \ container_of(drv,struct of_platform_driver, driver) +extern const struct of_device_id of_default_bus_match_table[]; + /* Platform drivers register/unregister */ extern struct platform_device *of_device_alloc(struct device_node *np, const char *bus_id, @@ -55,6 +91,10 @@ extern struct platform_device *of_platform_device_create(struct device_node *np, extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); +extern int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent); #endif /* !CONFIG_SPARC */ #endif /* CONFIG_OF_DEVICE */ diff --git a/include/linux/opp.h b/include/linux/opp.h index 5449945d589..7020e9736fc 100644 --- a/include/linux/opp.h +++ b/include/linux/opp.h @@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq) #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) int opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table); +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table); #else static inline int opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table) { return -EINVAL; } + +static inline +void opp_free_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ +} #endif /* CONFIG_CPU_FREQ */ #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 716875e5352..8e38d4c140f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -134,7 +134,7 @@ static inline int page_cache_get_speculative(struct page *page) VM_BUG_ON(in_interrupt()); #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) -# ifdef CONFIG_PREEMPT +# ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic()); # endif /* @@ -172,7 +172,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) VM_BUG_ON(in_interrupt()); #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) -# ifdef CONFIG_PREEMPT +# ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic()); # endif VM_BUG_ON(page_count(page) == 0); diff --git a/include/linux/pci.h b/include/linux/pci.h index c446b5ca2d3..4e4203a9631 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1589,5 +1589,44 @@ int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw); +/* PCI <-> OF binding helpers */ +#ifdef CONFIG_OF +struct device_node; +extern void pci_set_of_node(struct pci_dev *dev); +extern void pci_release_of_node(struct pci_dev *dev); +extern void pci_set_bus_of_node(struct pci_bus *bus); +extern void pci_release_bus_of_node(struct pci_bus *bus); + +/* Arch may override this (weak) */ +extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus); + +static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) +{ + return pdev ? pdev->dev.of_node : NULL; +} + +static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) +{ + return bus ? bus->dev.of_node : NULL; +} + +#else /* CONFIG_OF */ +static inline void pci_set_of_node(struct pci_dev *dev) { } +static inline void pci_release_of_node(struct pci_dev *dev) { } +static inline void pci_set_bus_of_node(struct pci_bus *bus) { } +static inline void pci_release_bus_of_node(struct pci_bus *bus) { } +#endif /* CONFIG_OF */ + +/** + * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device + * @pdev: the PCI device + * + * if the device is PCIE, return NULL + * if the device isn't connected to a PCIe bridge (that is its parent is a + * legacy PCI bridge and the bridge is directly connected to bus 0), return its + * parent + */ +struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f8910e15556..74173c585af 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1308,6 +1308,7 @@ #define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 #define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 #define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 +#define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062 #define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ @@ -2103,6 +2104,9 @@ #define PCI_DEVICE_ID_TIGON3_5761E 0x1680 #define PCI_DEVICE_ID_TIGON3_5761 0x1681 #define PCI_DEVICE_ID_TIGON3_5764 0x1684 +#define PCI_DEVICE_ID_NX2_57800 0x168a +#define PCI_DEVICE_ID_NX2_57840 0x168d +#define PCI_DEVICE_ID_NX2_57810 0x168e #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5784 0x1698 @@ -2110,11 +2114,17 @@ #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c #define PCI_DEVICE_ID_TIGON3_5789 0x169d +#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5 #define PCI_DEVICE_ID_TIGON3_5702X 0x16a6 #define PCI_DEVICE_ID_TIGON3_5703X 0x16a7 #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 +#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa +#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab #define PCI_DEVICE_ID_NX2_5708S 0x16ac +#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad +#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae +#define PCI_DEVICE_ID_NX2_57810_VF 0x16af #define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6 #define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7 #define PCI_DEVICE_ID_TIGON3_5781 0x16dd diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e0786e35f24..3f2711ccf91 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -61,7 +61,7 @@ enum perf_hw_id { /* * Generalized hardware cache events: * - * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x * { read, write, prefetch } x * { accesses, misses } */ @@ -72,6 +72,7 @@ enum perf_hw_cache_id { PERF_COUNT_HW_CACHE_DTLB = 3, PERF_COUNT_HW_CACHE_ITLB = 4, PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ }; @@ -536,6 +537,16 @@ struct perf_branch_stack { struct task_struct; +/* + * extra PMU register associated with an event + */ +struct hw_perf_event_extra { + u64 config; /* register value */ + unsigned int reg; /* register address or index */ + int alloc; /* extra register already allocated */ + int idx; /* index in shared_regs->regs[] */ +}; + /** * struct hw_perf_event - performance event hardware details: */ @@ -549,9 +560,7 @@ struct hw_perf_event { unsigned long event_base; int idx; int last_cpu; - unsigned int extra_reg; - u64 extra_config; - int extra_alloc; + struct hw_perf_event_extra extra_reg; }; struct { /* software */ struct hrtimer hrtimer; @@ -680,36 +689,9 @@ enum perf_event_active_state { }; struct file; - -#define PERF_BUFFER_WRITABLE 0x01 - -struct perf_buffer { - atomic_t refcount; - struct rcu_head rcu_head; -#ifdef CONFIG_PERF_USE_VMALLOC - struct work_struct work; - int page_order; /* allocation order */ -#endif - int nr_pages; /* nr of data pages */ - int writable; /* are we writable */ - - atomic_t poll; /* POLL_ for wakeups */ - - local_t head; /* write position */ - local_t nest; /* nested writers */ - local_t events; /* event limit */ - local_t wakeup; /* wakeup stamp */ - local_t lost; /* nr records lost */ - - long watermark; /* wakeup watermark */ - - struct perf_event_mmap_page *user_page; - void *data_pages[0]; -}; - struct perf_sample_data; -typedef void (*perf_overflow_handler_t)(struct perf_event *, int, +typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *regs); @@ -745,6 +727,8 @@ struct perf_cgroup { }; #endif +struct ring_buffer; + /** * struct perf_event - performance event kernel representation: */ @@ -834,7 +818,7 @@ struct perf_event { atomic_t mmap_count; int mmap_locked; struct user_struct *mmap_user; - struct perf_buffer *buffer; + struct ring_buffer *rb; /* poll related */ wait_queue_head_t waitq; @@ -855,6 +839,7 @@ struct perf_event { u64 id; perf_overflow_handler_t overflow_handler; + void *overflow_handler_context; #ifdef CONFIG_EVENT_TRACING struct ftrace_event_call *tp_event; @@ -919,8 +904,8 @@ struct perf_event_context { u64 parent_gen; u64 generation; int pin_count; - struct rcu_head rcu_head; int nr_cgroups; /* cgroup events present */ + struct rcu_head rcu_head; }; /* @@ -945,13 +930,11 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; - struct perf_buffer *buffer; + struct ring_buffer *rb; unsigned long wakeup; unsigned long size; void *addr; int page; - int nmi; - int sample; }; #ifdef CONFIG_PERF_EVENTS @@ -972,13 +955,15 @@ extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); +extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, - perf_overflow_handler_t callback); + perf_overflow_handler_t callback, + void *context); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -1018,7 +1003,7 @@ extern void perf_prepare_sample(struct perf_event_header *header, struct perf_event *event, struct pt_regs *regs); -extern int perf_event_overflow(struct perf_event *event, int nmi, +extern int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); @@ -1037,7 +1022,7 @@ static inline int is_software_event(struct perf_event *event) extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; -extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); +extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } @@ -1059,7 +1044,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) } static __always_inline void -perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { struct pt_regs hot_regs; @@ -1068,7 +1053,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; } - __perf_sw_event(event_id, nr, nmi, regs, addr); + __perf_sw_event(event_id, nr, regs, addr); } } @@ -1082,7 +1067,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task) static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) { - perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); __perf_event_task_sched_out(task, next); } @@ -1143,8 +1128,7 @@ extern void perf_bp_event(struct perf_event *event, void *data); #endif extern int perf_output_begin(struct perf_output_handle *handle, - struct perf_event *event, unsigned int size, - int nmi, int sample); + struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); extern void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); @@ -1166,10 +1150,13 @@ static inline void perf_event_delayed_put(struct task_struct *task) { } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } +static inline int perf_event_refresh(struct perf_event *event, int refresh) +{ + return -EINVAL; +} static inline void -perf_sw_event(u32 event_id, u64 nr, int nmi, - struct pt_regs *regs, u64 addr) { } +perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void perf_bp_event(struct perf_event *event, void *data) { } diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h new file mode 100644 index 00000000000..51ad0995aba --- /dev/null +++ b/include/linux/platform_data/pxa_sdhci.h @@ -0,0 +1,60 @@ +/* + * include/linux/platform_data/pxa_sdhci.h + * + * Copyright 2010 Marvell + * Zhangfei Gao <zhangfei.gao@marvell.com> + * + * PXA Platform - SDHCI platform data definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _PXA_SDHCI_H_ +#define _PXA_SDHCI_H_ + +/* pxa specific flag */ +/* Require clock free running */ +#define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0) +/* card always wired to host, like on-chip emmc */ +#define PXA_FLAG_CARD_PERMANENT (1<<1) +/* Board design supports 8-bit data on SD/SDIO BUS */ +#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) + +/* + * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI + * @flags: flags for platform requirement + * @clk_delay_cycles: + * mmp2: each step is roughly 100ps, 5bits width + * pxa910: each step is 1ns, 4bits width + * @clk_delay_sel: select clk_delay, used on pxa910 + * 0: choose feedback clk + * 1: choose feedback clk + delay value + * 2: choose internal clk + * @clk_delay_enable: enable clk_delay or not, used on pxa910 + * @ext_cd_gpio: gpio pin used for external CD line + * @ext_cd_gpio_invert: invert values for external CD gpio line + * @max_speed: the maximum speed supported + * @host_caps: Standard MMC host capabilities bit field. + * @quirks: quirks of platfrom + * @pm_caps: pm_caps of platfrom + */ +struct sdhci_pxa_platdata { + unsigned int flags; + unsigned int clk_delay_cycles; + unsigned int clk_delay_sel; + bool clk_delay_enable; + unsigned int ext_cd_gpio; + bool ext_cd_gpio_invert; + unsigned int max_speed; + unsigned int host_caps; + unsigned int quirks; + unsigned int pm_caps; +}; + +struct sdhci_pxa { + u8 clk_enable; + u8 power_mode; +}; +#endif /* _PXA_SDHCI_H_ */ diff --git a/include/linux/plist.h b/include/linux/plist.h index c9b9f322c8d..aa0fb390bd2 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -77,14 +77,9 @@ #include <linux/kernel.h> #include <linux/list.h> -#include <linux/spinlock_types.h> struct plist_head { struct list_head node_list; -#ifdef CONFIG_DEBUG_PI_LIST - raw_spinlock_t *rawlock; - spinlock_t *spinlock; -#endif }; struct plist_node { @@ -93,37 +88,13 @@ struct plist_node { struct list_head node_list; }; -#ifdef CONFIG_DEBUG_PI_LIST -# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock -# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock -#else -# define PLIST_HEAD_LOCK_INIT(_lock) -# define PLIST_HEAD_LOCK_INIT_RAW(_lock) -#endif - -#define _PLIST_HEAD_INIT(head) \ - .node_list = LIST_HEAD_INIT((head).node_list) - /** * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name - * @_lock: lock to initialize for this list - */ -#define PLIST_HEAD_INIT(head, _lock) \ -{ \ - _PLIST_HEAD_INIT(head), \ - PLIST_HEAD_LOCK_INIT(&(_lock)) \ -} - -/** - * PLIST_HEAD_INIT_RAW - static struct plist_head initializer - * @head: struct plist_head variable name - * @_lock: lock to initialize for this list */ -#define PLIST_HEAD_INIT_RAW(head, _lock) \ +#define PLIST_HEAD_INIT(head) \ { \ - _PLIST_HEAD_INIT(head), \ - PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \ + .node_list = LIST_HEAD_INIT((head).node_list) \ } /** @@ -141,31 +112,11 @@ struct plist_node { /** * plist_head_init - dynamic struct plist_head initializer * @head: &struct plist_head pointer - * @lock: spinlock protecting the list (debugging) */ static inline void -plist_head_init(struct plist_head *head, spinlock_t *lock) +plist_head_init(struct plist_head *head) { INIT_LIST_HEAD(&head->node_list); -#ifdef CONFIG_DEBUG_PI_LIST - head->spinlock = lock; - head->rawlock = NULL; -#endif -} - -/** - * plist_head_init_raw - dynamic struct plist_head initializer - * @head: &struct plist_head pointer - * @lock: raw_spinlock protecting the list (debugging) - */ -static inline void -plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) -{ - INIT_LIST_HEAD(&head->node_list); -#ifdef CONFIG_DEBUG_PI_LIST - head->rawlock = lock; - head->spinlock = NULL; -#endif } /** diff --git a/include/linux/pm.h b/include/linux/pm.h index 411e4f4be52..f7c84c9abd3 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -461,8 +461,8 @@ struct dev_pm_info { unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; - void *subsys_data; /* Owned by the subsystem. */ #endif + void *subsys_data; /* Owned by the subsystem. */ }; extern void update_pm_runtime_accounting(struct device *dev); @@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting(struct device *dev); * hibernation, system resume and during runtime PM transitions along with * subsystem-level and driver-level callbacks. */ -struct dev_power_domain { +struct dev_pm_domain { struct dev_pm_ops ops; }; @@ -553,11 +553,17 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h new file mode 100644 index 00000000000..21097cb086f --- /dev/null +++ b/include/linux/pm_domain.h @@ -0,0 +1,108 @@ +/* + * pm_domain.h - Definitions and headers related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_DOMAIN_H +#define _LINUX_PM_DOMAIN_H + +#include <linux/device.h> + +enum gpd_status { + GPD_STATE_ACTIVE = 0, /* PM domain is active */ + GPD_STATE_BUSY, /* Something is happening to the PM domain */ + GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ + GPD_STATE_POWER_OFF, /* PM domain is off */ +}; + +struct dev_power_governor { + bool (*power_down_ok)(struct dev_pm_domain *domain); +}; + +struct generic_pm_domain { + struct dev_pm_domain domain; /* PM domain operations */ + struct list_head gpd_list_node; /* Node in the global PM domains list */ + struct list_head sd_node; /* Node in the parent's subdomain list */ + struct generic_pm_domain *parent; /* Parent PM domain */ + struct list_head sd_list; /* List of dubdomains */ + struct list_head dev_list; /* List of devices */ + struct mutex lock; + struct dev_power_governor *gov; + struct work_struct power_off_work; + unsigned int in_progress; /* Number of devices being suspended now */ + unsigned int sd_count; /* Number of subdomains with power "on" */ + enum gpd_status status; /* Current state of the domain */ + wait_queue_head_t status_wait_queue; + struct task_struct *poweroff_task; /* Powering off task */ + unsigned int resume_count; /* Number of devices being resumed */ + unsigned int device_count; /* Number of devices */ + unsigned int suspended_count; /* System suspend device counter */ + unsigned int prepared_count; /* Suspend counter of prepared devices */ + bool suspend_power_off; /* Power status before system suspend */ + int (*power_off)(struct generic_pm_domain *domain); + int (*power_on)(struct generic_pm_domain *domain); + int (*start_device)(struct device *dev); + int (*stop_device)(struct device *dev); + bool (*active_wakeup)(struct device *dev); +}; + +static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) +{ + return container_of(pd, struct generic_pm_domain, domain); +} + +struct dev_list_entry { + struct list_head node; + struct device *dev; + bool need_restore; +}; + +#ifdef CONFIG_PM_GENERIC_DOMAINS +extern int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev); +extern int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev); +extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_subdomain); +extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target); +extern void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off); +extern int pm_genpd_poweron(struct generic_pm_domain *genpd); +extern void pm_genpd_poweroff_unused(void); +extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); +#else +static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + return -ENOSYS; +} +static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *new_sd) +{ + return -ENOSYS; +} +static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *target) +{ + return -ENOSYS; +} +static inline void pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) {} +static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) +{ + return -ENOSYS; +} +static inline void pm_genpd_poweroff_unused(void) {} +static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} +#endif + +#endif /* _LINUX_PM_DOMAIN_H */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 878cf84baeb..daac05d751b 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -82,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev) && !dev->power.disable_depth; } +static inline bool pm_runtime_status_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED; +} + static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -130,6 +135,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {} static inline bool device_run_wake(struct device *dev) { return false; } static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } @@ -247,41 +253,41 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) struct pm_clk_notifier_block { struct notifier_block nb; - struct dev_power_domain *pwr_domain; + struct dev_pm_domain *pm_domain; char *con_ids[]; }; -#ifdef CONFIG_PM_RUNTIME_CLK -extern int pm_runtime_clk_init(struct device *dev); -extern void pm_runtime_clk_destroy(struct device *dev); -extern int pm_runtime_clk_add(struct device *dev, const char *con_id); -extern void pm_runtime_clk_remove(struct device *dev, const char *con_id); -extern int pm_runtime_clk_suspend(struct device *dev); -extern int pm_runtime_clk_resume(struct device *dev); +#ifdef CONFIG_PM_CLK +extern int pm_clk_init(struct device *dev); +extern void pm_clk_destroy(struct device *dev); +extern int pm_clk_add(struct device *dev, const char *con_id); +extern void pm_clk_remove(struct device *dev, const char *con_id); +extern int pm_clk_suspend(struct device *dev); +extern int pm_clk_resume(struct device *dev); #else -static inline int pm_runtime_clk_init(struct device *dev) +static inline int pm_clk_init(struct device *dev) { return -EINVAL; } -static inline void pm_runtime_clk_destroy(struct device *dev) +static inline void pm_clk_destroy(struct device *dev) { } -static inline int pm_runtime_clk_add(struct device *dev, const char *con_id) +static inline int pm_clk_add(struct device *dev, const char *con_id) { return -EINVAL; } -static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id) +static inline void pm_clk_remove(struct device *dev, const char *con_id) { } -#define pm_runtime_clock_suspend NULL -#define pm_runtime_clock_resume NULL +#define pm_clk_suspend NULL +#define pm_clk_resume NULL #endif #ifdef CONFIG_HAVE_CLK -extern void pm_runtime_clk_add_notifier(struct bus_type *bus, +extern void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb); #else -static inline void pm_runtime_clk_add_notifier(struct bus_type *bus, +static inline void pm_clk_add_notifier(struct bus_type *bus, struct pm_clk_notifier_block *clknb) { } diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 2e681d9555b..58969b2a8a8 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -27,6 +27,21 @@ asmlinkage void preempt_schedule(void); +#define preempt_check_resched() \ +do { \ + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + preempt_schedule(); \ +} while (0) + +#else /* !CONFIG_PREEMPT */ + +#define preempt_check_resched() do { } while (0) + +#endif /* CONFIG_PREEMPT */ + + +#ifdef CONFIG_PREEMPT_COUNT + #define preempt_disable() \ do { \ inc_preempt_count(); \ @@ -39,12 +54,6 @@ do { \ dec_preempt_count(); \ } while (0) -#define preempt_check_resched() \ -do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ - preempt_schedule(); \ -} while (0) - #define preempt_enable() \ do { \ preempt_enable_no_resched(); \ @@ -80,18 +89,17 @@ do { \ preempt_check_resched(); \ } while (0) -#else +#else /* !CONFIG_PREEMPT_COUNT */ #define preempt_disable() do { } while (0) #define preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) -#define preempt_check_resched() do { } while (0) #define preempt_disable_notrace() do { } while (0) #define preempt_enable_no_resched_notrace() do { } while (0) #define preempt_enable_notrace() do { } while (0) -#endif +#endif /* CONFIG_PREEMPT_COUNT */ #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 9178d5cc0b0..800f113bea6 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -47,6 +47,13 @@ #define PTRACE_GETREGSET 0x4204 #define PTRACE_SETREGSET 0x4205 +#define PTRACE_SEIZE 0x4206 +#define PTRACE_INTERRUPT 0x4207 +#define PTRACE_LISTEN 0x4208 + +/* flags in @data for PTRACE_SEIZE */ +#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */ + /* options set using PTRACE_SETOPTIONS */ #define PTRACE_O_TRACESYSGOOD 0x00000001 #define PTRACE_O_TRACEFORK 0x00000002 @@ -65,6 +72,7 @@ #define PTRACE_EVENT_EXEC 4 #define PTRACE_EVENT_VFORK_DONE 5 #define PTRACE_EVENT_EXIT 6 +#define PTRACE_EVENT_STOP 7 #include <asm/ptrace.h> @@ -77,16 +85,22 @@ * flags. When the a task is stopped the ptracer owns task->ptrace. */ +#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 #define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ #define PT_TRACESYSGOOD 0x00000004 #define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */ -#define PT_TRACE_FORK 0x00000010 -#define PT_TRACE_VFORK 0x00000020 -#define PT_TRACE_CLONE 0x00000040 -#define PT_TRACE_EXEC 0x00000080 -#define PT_TRACE_VFORK_DONE 0x00000100 -#define PT_TRACE_EXIT 0x00000200 + +/* PT_TRACE_* event enable flags */ +#define PT_EVENT_FLAG_SHIFT 4 +#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1)) + +#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) +#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) +#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) +#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) +#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) +#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) #define PT_TRACE_MASK 0x000003f4 @@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request, extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); -extern int ptrace_check_attach(struct task_struct *task, int kill); +extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern void ptrace_notify(int exit_code); @@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); static inline int ptrace_reparented(struct task_struct *child) { - return child->real_parent != child->parent; + return !same_thread_group(child->real_parent, child->parent); } static inline void ptrace_unlink(struct task_struct *child) @@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data); /** - * task_ptrace - return %PT_* flags that apply to a task - * @task: pointer to &task_struct in question + * ptrace_parent - return the task that is tracing the given task + * @task: task to consider * - * Returns the %PT_* flags that apply to @task. + * Returns %NULL if no one is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() held + * on @task, still held from when check_unsafe_exec() was called. */ -static inline int task_ptrace(struct task_struct *task) +static inline struct task_struct *ptrace_parent(struct task_struct *task) { - return task->ptrace; + if (unlikely(task->ptrace)) + return rcu_dereference(task->parent); + return NULL; +} + +/** + * ptrace_event_enabled - test whether a ptrace event is enabled + * @task: ptracee of interest + * @event: %PTRACE_EVENT_* to test + * + * Test whether @event is enabled for ptracee @task. + * + * Returns %true if @event is enabled, %false otherwise. + */ +static inline bool ptrace_event_enabled(struct task_struct *task, int event) +{ + return task->ptrace & PT_EVENT_FLAG(event); } /** * ptrace_event - possibly stop for a ptrace event notification - * @mask: %PT_* bit to check in @current->ptrace - * @event: %PTRACE_EVENT_* value to report if @mask is set + * @event: %PTRACE_EVENT_* value to report * @message: value for %PTRACE_GETEVENTMSG to return * - * This checks the @mask bit to see if ptrace wants stops for this event. - * If so we stop, reporting @event and @message to the ptrace parent. - * - * Returns nonzero if we did a ptrace notification, zero if not. + * Check whether @event is enabled and, if so, report @event and @message + * to the ptrace parent. * * Called without locks. */ -static inline int ptrace_event(int mask, int event, unsigned long message) +static inline void ptrace_event(int event, unsigned long message) { - if (mask && likely(!(current->ptrace & mask))) - return 0; - current->ptrace_message = message; - ptrace_notify((event << 8) | SIGTRAP); - return 1; + if (unlikely(ptrace_event_enabled(current, event))) { + current->ptrace_message = message; + ptrace_notify((event << 8) | SIGTRAP); + } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) { + /* legacy EXEC report via SIGTRAP */ + send_sig(SIGTRAP, current, 0); + } } /** @@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) { INIT_LIST_HEAD(&child->ptrace_entry); INIT_LIST_HEAD(&child->ptraced); - child->parent = child->real_parent; +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_set(&child->ptrace_bp_refcnt, 1); +#endif + child->jobctl = 0; child->ptrace = 0; - if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) { + child->parent = child->real_parent; + + if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; __ptrace_link(child, current->parent); - } -#ifdef CONFIG_HAVE_HW_BREAKPOINT - atomic_set(&child->ptrace_bp_refcnt, 1); -#endif + if (child->ptrace & PT_SEIZED) + task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); + else + sigaddset(&child->pending.signal, SIGSTOP); + + set_tsk_thread_flag(child, TIF_SIGPENDING); + } } /** diff --git a/include/linux/random.h b/include/linux/random.h index fb7ab9de5f3..ce29a040c8d 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -58,6 +58,7 @@ extern void get_random_bytes(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); extern __u32 secure_ip_id(__be32 daddr); +extern __u32 secure_ipv6_id(const __be32 daddr[4]); extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e3beb315517..d079290843a 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -183,7 +183,7 @@ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *last = list->prev; struct list_head *at = head->next; - if (list_empty(head)) + if (list_empty(list)) return; /* "first" and "last" tracking list, so initialize it. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 99f9aa7c280..8f4f881a0ad 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -239,7 +239,7 @@ extern int rcu_read_lock_bh_held(void); * Check debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) { int lockdep_opinion = 0; @@ -250,12 +250,12 @@ static inline int rcu_read_lock_sched_held(void) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); } -#else /* #ifdef CONFIG_PREEMPT */ +#else /* #ifdef CONFIG_PREEMPT_COUNT */ static inline int rcu_read_lock_sched_held(void) { return 1; } -#endif /* #else #ifdef CONFIG_PREEMPT */ +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -276,17 +276,17 @@ static inline int rcu_read_lock_bh_held(void) return 1; } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT static inline int rcu_read_lock_sched_held(void) { return preempt_count() != 0 || irqs_disabled(); } -#else /* #ifdef CONFIG_PREEMPT */ +#else /* #ifdef CONFIG_PREEMPT_COUNT */ static inline int rcu_read_lock_sched_held(void) { return 1; } -#endif /* #else #ifdef CONFIG_PREEMPT */ +#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h new file mode 100644 index 00000000000..60a65cd7e1a --- /dev/null +++ b/include/linux/regmap.h @@ -0,0 +1,82 @@ +#ifndef __LINUX_REGMAP_H +#define __LINUX_REGMAP_H + +/* + * Register map access API + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/device.h> +#include <linux/list.h> +#include <linux/module.h> + +struct i2c_client; +struct spi_device; + +struct regmap_config { + int reg_bits; + int val_bits; +}; + +typedef int (*regmap_hw_write)(struct device *dev, const void *data, + size_t count); +typedef int (*regmap_hw_gather_write)(struct device *dev, + const void *reg, size_t reg_len, + const void *val, size_t val_len); +typedef int (*regmap_hw_read)(struct device *dev, + const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + +/** + * Description of a hardware bus for the register map infrastructure. + * + * @list: Internal use. + * @type: Bus type, used to identify bus to be used for a device. + * @write: Write operation. + * @gather_write: Write operation with split register/value, return -ENOTSUPP + * if not implemented on a given device. + * @read: Read operation. Data is returned in the buffer used to transmit + * data. + * @owner: Module with the bus implementation, used to pin the implementation + * in memory. + * @read_flag_mask: Mask to be set in the top byte of the register when doing + * a read. + */ +struct regmap_bus { + struct list_head list; + struct bus_type *type; + regmap_hw_write write; + regmap_hw_gather_write gather_write; + regmap_hw_read read; + struct module *owner; + u8 read_flag_mask; +}; + +struct regmap *regmap_init(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); +struct regmap *regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config); +struct regmap *regmap_init_spi(struct spi_device *dev, + const struct regmap_config *config); + +void regmap_exit(struct regmap *map); +int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); +int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len); +int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); +int regmap_raw_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len); +int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, + size_t val_count); +int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val); + +#endif diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 6deef5dc95f..57958c0e1d3 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -41,10 +41,11 @@ int reiserfs_xattr_init(struct super_block *sb, int mount_flags); int reiserfs_lookup_privroot(struct super_block *sb); int reiserfs_delete_xattrs(struct inode *inode); int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); -int reiserfs_permission(struct inode *inode, int mask, unsigned int flags); +int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) +int reiserfs_check_acl(struct inode *inode, int mask); ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); int reiserfs_setxattr(struct dentry *dentry, const char *name, @@ -122,6 +123,7 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode) #define reiserfs_setxattr NULL #define reiserfs_listxattr NULL #define reiserfs_removexattr NULL +#define reiserfs_check_acl NULL static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index ab38ac80b0f..b891de96000 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, size_t ring_buffer_page_len(void *page); -void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); +void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, size_t len, int cpu, int full); diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 8d522ffeda3..de17134244f 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -66,7 +66,7 @@ struct hrtimer_sleeper; #define __RT_MUTEX_INITIALIZER(mutexname) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ + , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ , .owner = NULL \ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} @@ -100,7 +100,7 @@ extern void rt_mutex_unlock(struct rt_mutex *lock); #ifdef CONFIG_RT_MUTEXES # define INIT_RT_MUTEXES(tsk) \ - .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \ + .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \ INIT_RT_MUTEX_DEBUG(tsk) #else # define INIT_RT_MUTEXES(tsk) diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bbad657a372..c81226a9a35 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -585,6 +585,8 @@ enum rtnetlink_groups { #define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_ROUTE, #define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE + RTNLGRP_DCB, +#define RTNLGRP_DCB RTNLGRP_DCB __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index a8afe9cd000..77950dfa0a9 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -124,19 +124,9 @@ extern void downgrade_write(struct rw_semaphore *sem); */ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); -/* - * Take/release a lock when not the owner will release it. - * - * [ This API should be avoided as much as possible - the - * proper abstraction for this case is completions. ] - */ -extern void down_read_non_owner(struct rw_semaphore *sem); -extern void up_read_non_owner(struct rw_semaphore *sem); #else # define down_read_nested(sem, subclass) down_read(sem) # define down_write_nested(sem, subclass) down_write(sem) -# define down_read_non_owner(sem) down_read(sem) -# define up_read_non_owner(sem) up_read(sem) #endif #endif /* _LINUX_RWSEM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 496770a9648..20b03bf9474 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -844,6 +844,7 @@ enum cpu_idle_type { #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ enum powersavings_balance_level { POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ @@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void) return 0; } -struct sched_group { - struct sched_group *next; /* Must be a circular list */ +struct sched_group_power { atomic_t ref; - /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a * single CPU. */ - unsigned int cpu_power, cpu_power_orig; + unsigned int power, power_orig; +}; + +struct sched_group { + struct sched_group *next; /* Must be a circular list */ + atomic_t ref; + unsigned int group_weight; + struct sched_group_power *sgp; /* * The CPUs this group covers. @@ -1254,6 +1260,9 @@ struct task_struct { #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; char rcu_read_unlock_special; +#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) + int rcu_boosted; +#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */ struct list_head rcu_node_entry; #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TREE_PREEMPT_RCU @@ -1283,7 +1292,7 @@ struct task_struct { int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ - unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ + unsigned int jobctl; /* JOBCTL_*, siglock protected */ /* ??? */ unsigned int personality; unsigned did_exec:1; @@ -1503,7 +1512,6 @@ struct task_struct { short il_next; short pref_node_fork; #endif - atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; /* @@ -1804,15 +1812,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define used_math() tsk_used_math(current) /* - * task->group_stop flags + * task->jobctl flags */ -#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ -#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ -#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ -#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ -#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ - -extern void task_clear_group_stop_pending(struct task_struct *task); +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ + +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */ +#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ +#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ + +#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT) +#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT) +#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) +#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT) + +#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) + +extern bool task_set_jobctl_pending(struct task_struct *task, + unsigned int mask); +extern void task_clear_jobctl_trapping(struct task_struct *task); +extern void task_clear_jobctl_pending(struct task_struct *task, + unsigned int mask); #ifdef CONFIG_PREEMPT_RCU @@ -2127,7 +2154,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s spin_unlock_irqrestore(&tsk->sighand->siglock, flags); return ret; -} +} extern void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask); @@ -2142,7 +2169,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_ extern int kill_pgrp(struct pid *pid, int sig, int priv); extern int kill_pid(struct pid *pid, int sig, int priv); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int do_notify_parent(struct task_struct *, int); +extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); @@ -2266,8 +2293,10 @@ static inline int get_nr_threads(struct task_struct *tsk) return tsk->signal->nr_threads; } -/* de_thread depends on thread_group_leader not being a pid based check */ -#define thread_group_leader(p) (p == p->group_leader) +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} /* Do to the insanities of de_thread it is possible for a process * to have the pid of the thread group leader without actually being @@ -2300,11 +2329,6 @@ static inline int thread_group_empty(struct task_struct *p) #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) -static inline int task_detached(struct task_struct *p) -{ - return p->exit_signal == -1; -} - /* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also @@ -2501,7 +2525,7 @@ extern int _cond_resched(void); extern int __cond_resched_lock(spinlock_t *lock); -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPT_COUNT #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #else #define PREEMPT_LOCK_OFFSET 0 diff --git a/include/linux/sdla.h b/include/linux/sdla.h index 564acd3a71c..9995c7fc3f6 100644 --- a/include/linux/sdla.h +++ b/include/linux/sdla.h @@ -112,11 +112,7 @@ struct sdla_dlci_conf { short Tb_max; }; -#ifndef __KERNEL__ - -void sdla(void *cfg_info, char *dev, struct frad_conf *conf, int quiet); - -#else +#ifdef __KERNEL__ /* important Z80 window addresses */ #define SDLA_CONTROL_WND 0xE000 diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 167c33361d9..cc7a4e9cc7a 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -19,6 +19,11 @@ static inline void secure_computing(int this_syscall) extern long prctl_get_seccomp(void); extern long prctl_set_seccomp(unsigned long); +static inline int seccomp_mode(seccomp_t *s) +{ + return s->mode; +} + #else /* CONFIG_SECCOMP */ #include <linux/errno.h> @@ -37,6 +42,11 @@ static inline long prctl_set_seccomp(unsigned long arg2) return -EINVAL; } +static inline int seccomp_mode(seccomp_t *s) +{ + return 0; +} + #endif /* CONFIG_SECCOMP */ #endif /* _LINUX_SECCOMP_H */ diff --git a/include/linux/security.h b/include/linux/security.h index 8ce59ef3e5a..ebd2a53a3d0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1456,7 +1456,7 @@ struct security_operations { struct inode *new_dir, struct dentry *new_dentry); int (*inode_readlink) (struct dentry *dentry); int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); - int (*inode_permission) (struct inode *inode, int mask, unsigned flags); + int (*inode_permission) (struct inode *inode, int mask); int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); int (*inode_setxattr) (struct dentry *dentry, const char *name, @@ -1720,7 +1720,6 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); int security_inode_permission(struct inode *inode, int mask); -int security_inode_exec_permission(struct inode *inode, unsigned int flags); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); int security_inode_setxattr(struct dentry *dentry, const char *name, @@ -2113,12 +2112,6 @@ static inline int security_inode_permission(struct inode *inode, int mask) return 0; } -static inline int security_inode_exec_permission(struct inode *inode, - unsigned int flags) -{ - return 0; -} - static inline int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 03c0232b416..be720cd2038 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -23,6 +23,7 @@ struct seq_file { u64 version; struct mutex lock; const struct seq_operations *op; + int poll_event; void *private; }; diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h new file mode 100644 index 00000000000..790651b4e5b --- /dev/null +++ b/include/linux/shrinker.h @@ -0,0 +1,42 @@ +#ifndef _LINUX_SHRINKER_H +#define _LINUX_SHRINKER_H + +/* + * This struct is used to pass information from page reclaim to the shrinkers. + * We consolidate the values for easier extention later. + */ +struct shrink_control { + gfp_t gfp_mask; + + /* How many slab objects shrinker() should scan and try to reclaim */ + unsigned long nr_to_scan; +}; + +/* + * A callback you can register to apply pressure to ageable caches. + * + * 'sc' is passed shrink_control which includes a count 'nr_to_scan' + * and a 'gfpmask'. It should look through the least-recently-used + * 'nr_to_scan' entries and attempt to free them up. It should return + * the number of objects which remain in the cache. If it returns -1, it means + * it cannot do any scanning at this time (eg. there is a risk of deadlock). + * + * The 'gfpmask' refers to the allocation we are currently trying to + * fulfil. + * + * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is + * querying the cache size, so a fastpath for that case is appropriate. + */ +struct shrinker { + int (*shrink)(struct shrinker *, struct shrink_control *sc); + int seeks; /* seeks to recreate an obj */ + long batch; /* reclaim batch size, 0 = default */ + + /* These are for internal use */ + struct list_head list; + long nr; /* objs pending delete */ +}; +#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ +extern void register_shrinker(struct shrinker *); +extern void unregister_shrinker(struct shrinker *); +#endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c0a4f3ab0cc..a24218c9c84 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -187,6 +187,20 @@ enum { /* ensure the originating sk reference is available on driver level */ SKBTX_DRV_NEEDS_SK_REF = 1 << 3, + + /* device driver supports TX zero-copy buffers */ + SKBTX_DEV_ZEROCOPY = 1 << 4, +}; + +/* + * The callback notifies userspace to release buffers when skb DMA is done in + * lower device, the skb last reference should be 0 when calling this. + * The desc is used to track userspace buffer index. + */ +struct ubuf_info { + void (*callback)(void *); + void *arg; + unsigned long desc; }; /* This data is invariant across clones and lives at @@ -211,6 +225,7 @@ struct skb_shared_info { /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ void * destructor_arg; + /* must be last field, see pskb_expand_head() */ skb_frag_t frags[MAX_SKB_FRAGS]; }; @@ -270,15 +285,12 @@ typedef unsigned char *sk_buff_data_t; * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list - * @sk: Socket we are owned by * @tstamp: Time we arrived + * @sk: Socket we are owned by * @dev: Device we arrived on/are leaving by - * @transport_header: Transport layer header - * @network_header: Network layer header - * @mac_header: Link layer header + * @cb: Control buffer. Free for use by every layer. Put private vars here * @_skb_refdst: destination entry (with norefcount bit) * @sp: the security path, used for xfrm - * @cb: Control buffer. Free for use by every layer. Put private vars here * @len: Length of actual data * @data_len: Data length * @mac_len: Length of link layer header @@ -286,40 +298,45 @@ typedef unsigned char *sk_buff_data_t; * @csum: Checksum (must include start/offset pair) * @csum_start: Offset from skb->head where checksumming should start * @csum_offset: Offset from csum_start where checksum should be stored + * @priority: Packet queueing priority * @local_df: allow local fragmentation * @cloned: Head may be cloned (check refcnt to be sure) + * @ip_summed: Driver fed us an IP checksum * @nohdr: Payload reference only, must not modify header + * @nfctinfo: Relationship of this skb to the connection * @pkt_type: Packet class * @fclone: skbuff clone status - * @ip_summed: Driver fed us an IP checksum - * @priority: Packet queueing priority - * @users: User count - see {datagram,tcp}.c - * @protocol: Packet protocol from driver - * @truesize: Buffer size - * @head: Head of buffer - * @data: Data head pointer - * @tail: Tail pointer - * @end: End pointer - * @destructor: Destruct function - * @mark: Generic packet mark - * @nfct: Associated connection, if any * @ipvs_property: skbuff is owned by ipvs * @peeked: this packet has been seen already, so stats have been * done for it, don't do them again * @nf_trace: netfilter packet trace flag - * @nfctinfo: Relationship of this skb to the connection + * @protocol: Packet protocol from driver + * @destructor: Destruct function + * @nfct: Associated connection, if any * @nfct_reasm: netfilter conntrack re-assembly pointer * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @skb_iif: ifindex of device we arrived on - * @rxhash: the packet hash computed on receive - * @queue_mapping: Queue mapping for multiqueue devices * @tc_index: Traffic control index * @tc_verd: traffic control verdict + * @rxhash: the packet hash computed on receive + * @queue_mapping: Queue mapping for multiqueue devices * @ndisc_nodetype: router type (from link layer) + * @ooo_okay: allow the mapping of a socket to a queue to be changed * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking + * @mark: Generic packet mark + * @dropcount: total number of sk_receive_queue overflows * @vlan_tci: vlan tag control information + * @transport_header: Transport layer header + * @network_header: Network layer header + * @mac_header: Link layer header + * @tail: Tail pointer + * @end: End pointer + * @head: Head of buffer + * @data: Data head pointer + * @truesize: Buffer size + * @users: User count - see {datagram,tcp}.c */ struct sk_buff { @@ -1562,16 +1579,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, return __netdev_alloc_skb(dev, length, GFP_ATOMIC); } -static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, - unsigned int length) +static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length, gfp_t gfp) { - struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN); + struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); if (NET_IP_ALIGN && skb) skb_reserve(skb, NET_IP_ALIGN); return skb; } +static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); +} + /** * __netdev_alloc_page - allocate a page for ps-rx on a specific device * @dev: network device to receive on @@ -2028,8 +2051,7 @@ static inline void sw_tx_timestamp(struct sk_buff *skb) * skb_tx_timestamp() - Driver hook for transmit timestamping * * Ethernet MAC Drivers should call this function in their hard_xmit() - * function as soon as possible after giving the sk_buff to the MAC - * hardware, but before freeing the sk_buff. + * function immediately before giving the sk_buff to the MAC hardware. * * @skb: A socket buffer. */ @@ -2266,5 +2288,6 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb) } bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index ad4dd1c8d30..573c809c33d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -134,6 +134,26 @@ unsigned int kmem_cache_size(struct kmem_cache *); #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT) /* + * Some archs want to perform DMA into kmalloc caches and need a guaranteed + * alignment larger than the alignment of a 64-bit integer. + * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. + */ +#ifdef ARCH_DMA_MINALIGN +#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN +#else +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#endif + +/* + * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. + * Intended for arches that get misalignment faults even for 64 bit integer + * aligned buffers. + */ +#ifndef ARCH_SLAB_MINALIGN +#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) +#endif + +/* * Common kmalloc functions provided by all allocators */ void * __must_check __krealloc(const void *, size_t, gfp_t); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 83203ae9390..d00e0bacda9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -18,53 +18,25 @@ #include <trace/events/kmem.h> /* - * Enforce a minimum alignment for the kmalloc caches. - * Usually, the kmalloc caches are cache_line_size() aligned, except when - * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. - * Some archs want to perform DMA into kmalloc caches and need a guaranteed - * alignment larger than the alignment of a 64-bit integer. - * ARCH_KMALLOC_MINALIGN allows that. - * Note that increasing this value may disable some debug features. - */ -#ifdef ARCH_DMA_MINALIGN -#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN -#else -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -/* - * Enforce a minimum alignment for all caches. - * Intended for archs that get misalignment faults even for BYTES_PER_WORD - * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. - * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables - * some debug features. - */ -#define ARCH_SLAB_MINALIGN 0 -#endif - -/* * struct kmem_cache * * manages a cache. */ struct kmem_cache { -/* 1) per-cpu data, touched during every alloc/free */ - struct array_cache *array[NR_CPUS]; -/* 2) Cache tunables. Protected by cache_chain_mutex */ +/* 1) Cache tunables. Protected by cache_chain_mutex */ unsigned int batchcount; unsigned int limit; unsigned int shared; unsigned int buffer_size; u32 reciprocal_buffer_size; -/* 3) touched by every alloc & free from the backend */ +/* 2) touched by every alloc & free from the backend */ unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ -/* 4) cache_grow/shrink */ +/* 3) cache_grow/shrink */ /* order of pgs per slab (2^n) */ unsigned int gfporder; @@ -80,11 +52,11 @@ struct kmem_cache { /* constructor func */ void (*ctor)(void *obj); -/* 5) cache creation/removal */ +/* 4) cache creation/removal */ const char *name; struct list_head next; -/* 6) statistics */ +/* 5) statistics */ #ifdef CONFIG_DEBUG_SLAB unsigned long num_active; unsigned long num_allocations; @@ -111,16 +83,18 @@ struct kmem_cache { int obj_size; #endif /* CONFIG_DEBUG_SLAB */ +/* 6) per-cpu/per-node data, touched during every alloc/free */ /* - * We put nodelists[] at the end of kmem_cache, because we want to size - * this array to nr_node_ids slots instead of MAX_NUMNODES + * We put array[] at the end of kmem_cache, because we want to size + * this array to nr_cpu_ids slots instead of NR_CPUS * (see kmem_cache_init()) - * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of nodes. + * We still use [NR_CPUS] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of cpus. */ - struct kmem_list3 *nodelists[MAX_NUMNODES]; + struct kmem_list3 **nodelists; + struct array_cache *array[NR_CPUS]; /* - * Do not add fields after nodelists[] + * Do not add fields after array[] */ }; diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 4382db09df4..0ec00b39d00 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -1,16 +1,6 @@ #ifndef __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H -#ifdef ARCH_DMA_MINALIGN -#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN -#else -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long) -#endif - void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index c8668d161dd..4b35c06dfbc 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -113,16 +113,6 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) -#ifdef ARCH_DMA_MINALIGN -#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN -#else -#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) -#endif - -#ifndef ARCH_SLAB_MINALIGN -#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) -#endif - /* * Maximum kmalloc object size handled by SLUB. Larger object allocations * are passed through to the page allocator. The page allocator "fastpath" @@ -228,6 +218,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order) return ret; } +/** + * Calling this on allocated memory will check that the memory + * is expected to be in use, and print warnings if not. + */ +#ifdef CONFIG_SLUB_DEBUG +extern bool verify_mem_not_deleted(const void *x); +#else +static inline bool verify_mem_not_deleted(const void *x) +{ + return true; +} +#endif + #ifdef CONFIG_TRACING extern void * kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); diff --git a/include/linux/socket.h b/include/linux/socket.h index 4ef98e422fd..e17f8226663 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -192,7 +192,8 @@ struct ucred { #define AF_IEEE802154 36 /* IEEE802154 sockets */ #define AF_CAIF 37 /* CAIF sockets */ #define AF_ALG 38 /* Algorithm sockets */ -#define AF_MAX 39 /* For now.. */ +#define AF_NFC 39 /* NFC sockets */ +#define AF_MAX 40 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -234,6 +235,7 @@ struct ucred { #define PF_IEEE802154 AF_IEEE802154 #define PF_CAIF AF_CAIF #define PF_ALG AF_ALG +#define PF_NFC AF_NFC #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h index d85c52f294a..0aa6acc7331 100644 --- a/include/linux/spi/74x164.h +++ b/include/linux/spi/74x164.h @@ -1,8 +1,6 @@ #ifndef LINUX_SPI_74X164_H #define LINUX_SPI_74X164_H -#define GEN_74X164_DRIVER_NAME "74x164" - struct gen_74x164_chip_platform_data { /* number assigned to the first GPIO */ unsigned base; diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h index c42cff8ca19..2d676d5aaa8 100644 --- a/include/linux/spi/mcp23s08.h +++ b/include/linux/spi/mcp23s08.h @@ -22,13 +22,4 @@ struct mcp23s08_platform_data { * base to base+15 (or base+31 for s17 variant). */ unsigned base; - - void *context; /* param to setup/teardown */ - - int (*setup)(struct spi_device *spi, - int gpio, unsigned ngpio, - void *context); - int (*teardown)(struct spi_device *spi, - int gpio, unsigned ngpio, - void *context); }; diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 252e4482178..8623217f84d 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -27,6 +27,8 @@ struct ssb_sprom { u8 et1mdcport; /* MDIO for enet1 */ u8 board_rev; /* Board revision number from SPROM. */ u8 country_code; /* Country Code */ + u16 leddc_on_time; /* LED Powersave Duty Cycle On Count */ + u16 leddc_off_time; /* LED Powersave Duty Cycle Off Count */ u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ u16 pa0b0; @@ -99,7 +101,7 @@ struct ssb_sprom { struct ssb_boardinfo { u16 vendor; u16 type; - u16 rev; + u8 rev; }; diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 25310f1d7f3..115b570e3bf 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -14,8 +14,8 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); -extern void save_stack_trace_regs(struct stack_trace *trace, - struct pt_regs *regs); +extern void save_stack_trace_regs(struct pt_regs *regs, + struct stack_trace *trace); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 9529e49b038..0dddc9e42b6 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -28,11 +28,21 @@ #include <linux/platform_device.h> -/* platform data for platform device structure's platform_data field */ +/* Platfrom data for platform device structure's platform_data field */ + +struct stmmac_mdio_bus_data { + int bus_id; + int (*phy_reset)(void *priv); + unsigned int phy_mask; + int *irqs; + int probed_phy_irq; +}; -/* Private data for the STM on-board ethernet driver */ struct plat_stmmacenet_data { int bus_id; + int phy_addr; + int interface; + struct stmmac_mdio_bus_data *mdio_bus_data; int pbl; int clk_csr; int has_gmac; @@ -40,6 +50,7 @@ struct plat_stmmacenet_data { int tx_coe; int bugged_jumbo; int pmt; + int force_sf_dma_mode; void (*fix_mac_speed)(void *priv, unsigned int speed); void (*bus_setup)(void __iomem *ioaddr); int (*init)(struct platform_device *pdev); @@ -47,14 +58,4 @@ struct plat_stmmacenet_data { void *custom_cfg; void *bsp_priv; }; - -struct plat_stmmacphy_data { - int bus_id; - int phy_addr; - unsigned int phy_mask; - int interface; - int (*phy_reset)(void *priv); - void *priv; -}; #endif - diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 092dc9b1ce7..4a9d0c7edc6 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -124,15 +124,19 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); */ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); +int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, + const struct cpumask *cpus); + #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ static inline int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) { + unsigned long flags; int ret; - local_irq_disable(); + local_irq_save(flags); ret = fn(data); - local_irq_enable(); + local_irq_restore(flags); return ret; } @@ -142,5 +146,11 @@ static inline int stop_machine(int (*fn)(void *), void *data, return __stop_machine(fn, data, cpus); } +static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, + const struct cpumask *cpus) +{ + return __stop_machine(fn, data, cpus); +} + #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ #endif /* _LINUX_STOP_MACHINE */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 083ffea7ba1..e1e3742733b 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t; * @enter() and @wake(), even if any of them fails. It is executed after * a failing @prepare. * + * @suspend_again: Returns whether the system should suspend again (true) or + * not (false). If the platform wants to poll sensors or execute some + * code during suspended without invoking userspace and most of devices, + * suspend_again callback is the place assuming that periodic-wakeup or + * alarm-wakeup is already setup. This allows to execute some codes while + * being kept suspended in the view of userland and devices. + * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. @@ -113,6 +120,7 @@ struct platform_suspend_ops { int (*enter)(suspend_state_t state); void (*wake)(void); void (*finish)(void); + bool (*suspend_again)(void); void (*end)(void); void (*recover)(void); }; diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index 7138962664f..b11f8ce2d3c 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -67,7 +67,7 @@ enum { TCF_META_ID_SK_FORWARD_ALLOCS, TCF_META_ID_SK_SNDBUF, TCF_META_ID_SK_ALLOCS, - TCF_META_ID_SK_ROUTE_CAPS, + __TCF_META_ID_SK_ROUTE_CAPS, /* unimplemented but in ABI already */ TCF_META_ID_SK_HASH, TCF_META_ID_SK_LINGERTIME, TCF_META_ID_SK_ACK_BACKLOG, diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e64f4c67d0e..531ede8006d 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -282,6 +282,7 @@ struct tcp_request_sock { #endif u32 rcv_isn; u32 snt_isn; + u32 snt_synack; /* synack sent time */ }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index e95f5236611..a71a2927a6a 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -51,27 +51,12 @@ #include <linux/security.h> struct linux_binprm; -/** - * tracehook_expect_breakpoints - guess if task memory might be touched - * @task: current task, making a new mapping - * - * Return nonzero if @task is expected to want breakpoint insertion in - * its memory at some point. A zero return is no guarantee it won't - * be done, but this is a hint that it's known to be likely. - * - * May be called with @task->mm->mmap_sem held for writing. - */ -static inline int tracehook_expect_breakpoints(struct task_struct *task) -{ - return (task_ptrace(task) & PT_PTRACED) != 0; -} - /* * ptrace report for syscall entry and exit looks identical. */ static inline void ptrace_report_syscall(struct pt_regs *regs) { - int ptrace = task_ptrace(current); + int ptrace = current->ptrace; if (!(ptrace & PT_PTRACED)) return; @@ -145,229 +130,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) } /** - * tracehook_unsafe_exec - check for exec declared unsafe due to tracing - * @task: current task doing exec - * - * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. - * - * @task->signal->cred_guard_mutex is held by the caller through the do_execve(). - */ -static inline int tracehook_unsafe_exec(struct task_struct *task) -{ - int unsafe = 0; - int ptrace = task_ptrace(task); - if (ptrace & PT_PTRACED) { - if (ptrace & PT_PTRACE_CAP) - unsafe |= LSM_UNSAFE_PTRACE_CAP; - else - unsafe |= LSM_UNSAFE_PTRACE; - } - return unsafe; -} - -/** - * tracehook_tracer_task - return the task that is tracing the given task - * @tsk: task to consider - * - * Returns NULL if no one is tracing @task, or the &struct task_struct - * pointer to its tracer. - * - * Must called under rcu_read_lock(). The pointer returned might be kept - * live only by RCU. During exec, this may be called with task_lock() - * held on @task, still held from when tracehook_unsafe_exec() was called. - */ -static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) -{ - if (task_ptrace(tsk) & PT_PTRACED) - return rcu_dereference(tsk->parent); - return NULL; -} - -/** - * tracehook_report_exec - a successful exec was completed - * @fmt: &struct linux_binfmt that performed the exec - * @bprm: &struct linux_binprm containing exec details - * @regs: user-mode register state - * - * An exec just completed, we are shortly going to return to user mode. - * The freshly initialized register state can be seen and changed in @regs. - * The name, file and other pointers in @bprm are still on hand to be - * inspected, but will be freed as soon as this returns. - * - * Called with no locks, but with some kernel resources held live - * and a reference on @fmt->module. - */ -static inline void tracehook_report_exec(struct linux_binfmt *fmt, - struct linux_binprm *bprm, - struct pt_regs *regs) -{ - if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) && - unlikely(task_ptrace(current) & PT_PTRACED)) - send_sig(SIGTRAP, current, 0); -} - -/** - * tracehook_report_exit - task has begun to exit - * @exit_code: pointer to value destined for @current->exit_code - * - * @exit_code points to the value passed to do_exit(), which tracing - * might change here. This is almost the first thing in do_exit(), - * before freeing any resources or setting the %PF_EXITING flag. - * - * Called with no locks held. - */ -static inline void tracehook_report_exit(long *exit_code) -{ - ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code); -} - -/** - * tracehook_prepare_clone - prepare for new child to be cloned - * @clone_flags: %CLONE_* flags from clone/fork/vfork system call - * - * This is called before a new user task is to be cloned. - * Its return value will be passed to tracehook_finish_clone(). - * - * Called with no locks held. - */ -static inline int tracehook_prepare_clone(unsigned clone_flags) -{ - if (clone_flags & CLONE_UNTRACED) - return 0; - - if (clone_flags & CLONE_VFORK) { - if (current->ptrace & PT_TRACE_VFORK) - return PTRACE_EVENT_VFORK; - } else if ((clone_flags & CSIGNAL) != SIGCHLD) { - if (current->ptrace & PT_TRACE_CLONE) - return PTRACE_EVENT_CLONE; - } else if (current->ptrace & PT_TRACE_FORK) - return PTRACE_EVENT_FORK; - - return 0; -} - -/** - * tracehook_finish_clone - new child created and being attached - * @child: new child task - * @clone_flags: %CLONE_* flags from clone/fork/vfork system call - * @trace: return value from tracehook_prepare_clone() - * - * This is called immediately after adding @child to its parent's children list. - * The @trace value is that returned by tracehook_prepare_clone(). - * - * Called with current's siglock and write_lock_irq(&tasklist_lock) held. - */ -static inline void tracehook_finish_clone(struct task_struct *child, - unsigned long clone_flags, int trace) -{ - ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); -} - -/** - * tracehook_report_clone - in parent, new child is about to start running - * @regs: parent's user register state - * @clone_flags: flags from parent's system call - * @pid: new child's PID in the parent's namespace - * @child: new child task - * - * Called after a child is set up, but before it has been started running. - * This is not a good place to block, because the child has not started - * yet. Suspend the child here if desired, and then block in - * tracehook_report_clone_complete(). This must prevent the child from - * self-reaping if tracehook_report_clone_complete() uses the @child - * pointer; otherwise it might have died and been released by the time - * tracehook_report_clone_complete() is called. - * - * Called with no locks held, but the child cannot run until this returns. - */ -static inline void tracehook_report_clone(struct pt_regs *regs, - unsigned long clone_flags, - pid_t pid, struct task_struct *child) -{ - if (unlikely(task_ptrace(child))) { - /* - * It doesn't matter who attached/attaching to this - * task, the pending SIGSTOP is right in any case. - */ - sigaddset(&child->pending.signal, SIGSTOP); - set_tsk_thread_flag(child, TIF_SIGPENDING); - } -} - -/** - * tracehook_report_clone_complete - new child is running - * @trace: return value from tracehook_prepare_clone() - * @regs: parent's user register state - * @clone_flags: flags from parent's system call - * @pid: new child's PID in the parent's namespace - * @child: child task, already running - * - * This is called just after the child has started running. This is - * just before the clone/fork syscall returns, or blocks for vfork - * child completion if @clone_flags has the %CLONE_VFORK bit set. - * The @child pointer may be invalid if a self-reaping child died and - * tracehook_report_clone() took no action to prevent it from self-reaping. - * - * Called with no locks held. - */ -static inline void tracehook_report_clone_complete(int trace, - struct pt_regs *regs, - unsigned long clone_flags, - pid_t pid, - struct task_struct *child) -{ - if (unlikely(trace)) - ptrace_event(0, trace, pid); -} - -/** - * tracehook_report_vfork_done - vfork parent's child has exited or exec'd - * @child: child task, already running - * @pid: new child's PID in the parent's namespace - * - * Called after a %CLONE_VFORK parent has waited for the child to complete. - * The clone/vfork system call will return immediately after this. - * The @child pointer may be invalid if a self-reaping child died and - * tracehook_report_clone() took no action to prevent it from self-reaping. - * - * Called with no locks held. - */ -static inline void tracehook_report_vfork_done(struct task_struct *child, - pid_t pid) -{ - ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid); -} - -/** - * tracehook_prepare_release_task - task is being reaped, clean up tracing - * @task: task in %EXIT_DEAD state - * - * This is called in release_task() just before @task gets finally reaped - * and freed. This would be the ideal place to remove and clean up any - * tracing-related state for @task. - * - * Called with no locks held. - */ -static inline void tracehook_prepare_release_task(struct task_struct *task) -{ -} - -/** - * tracehook_finish_release_task - final tracing clean-up - * @task: task in %EXIT_DEAD state - * - * This is called in release_task() when @task is being in the middle of - * being reaped. After this, there must be no tracing entanglements. - * - * Called with write_lock_irq(&tasklist_lock) held. - */ -static inline void tracehook_finish_release_task(struct task_struct *task) -{ - ptrace_release_task(task); -} - -/** * tracehook_signal_handler - signal handler setup is complete * @sig: number of signal being delivered * @info: siginfo_t of signal being delivered @@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info, ptrace_notify(SIGTRAP); } -/** - * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal - * @task: task receiving the signal - * @sig: signal number being sent - * - * Return zero iff tracing doesn't care to examine this ignored signal, - * so it can short-circuit normal delivery and never even get queued. - * - * Called with @task->sighand->siglock held. - */ -static inline int tracehook_consider_ignored_signal(struct task_struct *task, - int sig) -{ - return (task_ptrace(task) & PT_PTRACED) != 0; -} - -/** - * tracehook_consider_fatal_signal - suppress special handling of fatal signal - * @task: task receiving the signal - * @sig: signal number being sent - * - * Return nonzero to prevent special handling of this termination signal. - * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is - * ignored, in which case force_sig() is about to reset it to %SIG_DFL. - * When this returns zero, this signal might cause a quick termination - * that does not give the debugger a chance to intercept the signal. - * - * Called with or without @task->sighand->siglock held. - */ -static inline int tracehook_consider_fatal_signal(struct task_struct *task, - int sig) -{ - return (task_ptrace(task) & PT_PTRACED) != 0; -} - -/** - * tracehook_force_sigpending - let tracing force signal_pending(current) on - * - * Called when recomputing our signal_pending() flag. Return nonzero - * to force the signal_pending() flag on, so that tracehook_get_signal() - * will be called before the next return to user mode. - * - * Called with @current->sighand->siglock held. - */ -static inline int tracehook_force_sigpending(void) -{ - return 0; -} - -/** - * tracehook_get_signal - deliver synthetic signal to traced task - * @task: @current - * @regs: task_pt_regs(@current) - * @info: details of synthetic signal - * @return_ka: sigaction for synthetic signal - * - * Return zero to check for a real pending signal normally. - * Return -1 after releasing the siglock to repeat the check. - * Return a signal number to induce an artificial signal delivery, - * setting *@info and *@return_ka to specify its details and behavior. - * - * The @return_ka->sa_handler value controls the disposition of the - * signal, no matter the signal number. For %SIG_DFL, the return value - * is a representative signal to indicate the behavior (e.g. %SIGTERM - * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop, - * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number - * reported will be @info->si_signo instead. - * - * Called with @task->sighand->siglock held, before dequeuing pending signals. - */ -static inline int tracehook_get_signal(struct task_struct *task, - struct pt_regs *regs, - siginfo_t *info, - struct k_sigaction *return_ka) -{ - return 0; -} - -/** - * tracehook_finish_jctl - report about return from job control stop - * - * This is called by do_signal_stop() after wakeup. - */ -static inline void tracehook_finish_jctl(void) -{ -} - -#define DEATH_REAP -1 -#define DEATH_DELAYED_GROUP_LEADER -2 - -/** - * tracehook_notify_death - task is dead, ready to notify parent - * @task: @current task now exiting - * @death_cookie: value to pass to tracehook_report_death() - * @group_dead: nonzero if this was the last thread in the group to die - * - * A return value >= 0 means call do_notify_parent() with that signal - * number. Negative return value can be %DEATH_REAP to self-reap right - * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our - * parent. Note that a return value of 0 means a do_notify_parent() call - * that sends no signal, but still wakes up a parent blocked in wait*(). - * - * Called with write_lock_irq(&tasklist_lock) held. - */ -static inline int tracehook_notify_death(struct task_struct *task, - void **death_cookie, int group_dead) -{ - if (task_detached(task)) - return task->ptrace ? SIGCHLD : DEATH_REAP; - - /* - * If something other than our normal parent is ptracing us, then - * send it a SIGCHLD instead of honoring exit_signal. exit_signal - * only has special meaning to our real parent. - */ - if (thread_group_empty(task) && !ptrace_reparented(task)) - return task->exit_signal; - - return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; -} - -/** - * tracehook_report_death - task is dead and ready to be reaped - * @task: @current task now exiting - * @signal: return value from tracheook_notify_death() - * @death_cookie: value passed back from tracehook_notify_death() - * @group_dead: nonzero if this was the last thread in the group to die - * - * Thread has just become a zombie or is about to self-reap. If positive, - * @signal is the signal number just sent to the parent (usually %SIGCHLD). - * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is - * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. - * The @death_cookie was passed back by tracehook_notify_death(). - * - * If normal reaping is not inhibited, @task->exit_state might be changing - * in parallel. - * - * Called without locks. - */ -static inline void tracehook_report_death(struct task_struct *task, - int signal, void *death_cookie, - int group_dead) -{ -} - #ifdef TIF_NOTIFY_RESUME /** * set_notify_resume - cause tracehook_notify_resume() to be called diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 136040bba3e..970d5a2a904 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -63,6 +63,7 @@ struct virtio_net_config { * specify GSO or CSUM features, you can simply ignore the header. */ struct virtio_net_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset +#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid __u8 flags; #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame #define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f584aba78ca..2be2887c695 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -255,7 +255,7 @@ enum { WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ - WQ_DYING = 1 << 6, /* internal: workqueue is dying */ + WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ @@ -355,6 +355,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay); extern void flush_workqueue(struct workqueue_struct *wq); +extern void drain_workqueue(struct workqueue_struct *wq); extern void flush_scheduled_work(void); extern int schedule_work(struct work_struct *work); @@ -412,21 +413,6 @@ static inline bool __cancel_delayed_work(struct delayed_work *work) return ret; } -/* Obsolete. use cancel_delayed_work_sync() */ -static inline __deprecated -void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, - struct delayed_work *work) -{ - cancel_delayed_work_sync(work); -} - -/* Obsolete. use cancel_delayed_work_sync() */ -static inline __deprecated -void cancel_rearming_delayed_work(struct delayed_work *work) -{ - cancel_delayed_work_sync(work); -} - #ifndef CONFIG_SMP static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 008711e8e78..342dcf13d03 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -40,6 +40,7 @@ * @P9_DEBUG_FID: fid allocation/deallocation tracking * @P9_DEBUG_PKT: packet marshalling/unmarshalling * @P9_DEBUG_FSC: FS-cache tracing + * @P9_DEBUG_VPKT: Verbose packet debugging (full packet dump) * * These flags are passed at mount time to turn on various levels of * verbosity and tracing which will be output to the system logs. @@ -57,6 +58,7 @@ enum p9_debug_flags { P9_DEBUG_FID = (1<<9), P9_DEBUG_PKT = (1<<10), P9_DEBUG_FSC = (1<<11), + P9_DEBUG_VPKT = (1<<12), }; #ifdef CONFIG_NET_9P_DEBUG @@ -74,10 +76,14 @@ do { \ } \ } while (0) +#define P9_DUMP_PKT(way, pdu) p9pdu_dump(way, pdu) + #else #define P9_DPRINTK(level, format, arg...) do { } while (0) +#define P9_DUMP_PKT(way, pdu) do { } while (0) #endif + #define P9_EPRINTK(level, format, arg...) \ do { \ printk(level "9p: %s (%d): " \ @@ -175,6 +181,10 @@ enum p9_msg_t { P9_RLINK, P9_TMKDIR = 72, P9_RMKDIR, + P9_TRENAMEAT = 74, + P9_RRENAMEAT, + P9_TUNLINKAT = 76, + P9_RUNLINKAT, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, @@ -321,21 +331,6 @@ enum p9_qid_t { #define P9_READDIRHDRSZ 24 /** - * struct p9_str - length prefixed string type - * @len: length of the string - * @str: the string - * - * The protocol uses length prefixed strings for all - * string data, so we replicate that for our internal - * string members. - */ - -struct p9_str { - u16 len; - char *str; -}; - -/** * struct p9_qid - file system entity information * @type: 8-bit type &p9_qid_t * @version: 16-bit monotonically incrementing version number @@ -371,11 +366,11 @@ struct p9_qid { * @atime: Last access/read time * @mtime: Last modify/write time * @length: file length - * @name: last element of path (aka filename) in type &p9_str - * @uid: owner name in type &p9_str - * @gid: group owner in type &p9_str - * @muid: last modifier in type &p9_str - * @extension: area used to encode extended UNIX support in type &p9_str + * @name: last element of path (aka filename) + * @uid: owner name + * @gid: group owner + * @muid: last modifier + * @extension: area used to encode extended UNIX support * @n_uid: numeric user id of owner (part of 9p2000.u extension) * @n_gid: numeric group id (part of 9p2000.u extension) * @n_muid: numeric user id of laster modifier (part of 9p2000.u extension) @@ -512,11 +507,6 @@ struct p9_getlock { char *client_id; }; -/* Structures for Protocol Operations */ -struct p9_tstatfs { - u32 fid; -}; - struct p9_rstatfs { u32 type; u32 bsize; @@ -529,159 +519,6 @@ struct p9_rstatfs { u32 namelen; }; -struct p9_trename { - u32 fid; - u32 newdirfid; - struct p9_str name; -}; - -struct p9_rrename { -}; - -struct p9_tversion { - u32 msize; - struct p9_str version; -}; - -struct p9_rversion { - u32 msize; - struct p9_str version; -}; - -struct p9_tauth { - u32 afid; - struct p9_str uname; - struct p9_str aname; - u32 n_uname; /* 9P2000.u extensions */ -}; - -struct p9_rauth { - struct p9_qid qid; -}; - -struct p9_rerror { - struct p9_str error; - u32 errno; /* 9p2000.u extension */ -}; - -struct p9_tflush { - u16 oldtag; -}; - -struct p9_rflush { -}; - -struct p9_tattach { - u32 fid; - u32 afid; - struct p9_str uname; - struct p9_str aname; - u32 n_uname; /* 9P2000.u extensions */ -}; - -struct p9_rattach { - struct p9_qid qid; -}; - -struct p9_twalk { - u32 fid; - u32 newfid; - u16 nwname; - struct p9_str wnames[16]; -}; - -struct p9_rwalk { - u16 nwqid; - struct p9_qid wqids[16]; -}; - -struct p9_topen { - u32 fid; - u8 mode; -}; - -struct p9_ropen { - struct p9_qid qid; - u32 iounit; -}; - -struct p9_tcreate { - u32 fid; - struct p9_str name; - u32 perm; - u8 mode; - struct p9_str extension; -}; - -struct p9_rcreate { - struct p9_qid qid; - u32 iounit; -}; - -struct p9_tread { - u32 fid; - u64 offset; - u32 count; -}; - -struct p9_rread { - u32 count; - u8 *data; -}; - -struct p9_twrite { - u32 fid; - u64 offset; - u32 count; - u8 *data; -}; - -struct p9_rwrite { - u32 count; -}; - -struct p9_treaddir { - u32 fid; - u64 offset; - u32 count; -}; - -struct p9_rreaddir { - u32 count; - u8 *data; -}; - - -struct p9_tclunk { - u32 fid; -}; - -struct p9_rclunk { -}; - -struct p9_tremove { - u32 fid; -}; - -struct p9_rremove { -}; - -struct p9_tstat { - u32 fid; -}; - -struct p9_rstat { - struct p9_wstat stat; -}; - -struct p9_twstat { - u32 fid; - struct p9_wstat stat; -}; - -struct p9_rwstat { -}; - /** * struct p9_fcall - primary packet structure * @size: prefixed length of the structure diff --git a/include/net/9p/client.h b/include/net/9p/client.h index d26d5e98a17..55ce72ce986 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -36,9 +36,9 @@ */ enum p9_proto_versions{ - p9_proto_legacy = 0, - p9_proto_2000u = 1, - p9_proto_2000L = 2, + p9_proto_legacy, + p9_proto_2000u, + p9_proto_2000L, }; @@ -211,7 +211,10 @@ struct p9_dirent { }; int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); -int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name); +int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, + const char *name); +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name); struct p9_client *p9_client_create(const char *dev_name, char *options); void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); @@ -231,6 +234,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, u32 count); int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index d8549fb9c74..83531ebeee9 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -67,7 +67,7 @@ struct p9_trans_module { void v9fs_register_trans(struct p9_trans_module *m); void v9fs_unregister_trans(struct p9_trans_module *m); -struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name); +struct p9_trans_module *v9fs_get_trans_by_name(char *s); struct p9_trans_module *v9fs_get_default_trans(void); void v9fs_put_trans(struct p9_trans_module *m); #endif /* NET_9P_TRANSPORT_H */ diff --git a/include/net/act_api.h b/include/net/act_api.h index bab385f13ac..c739531e156 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -72,7 +72,7 @@ struct tcf_act_hdr { struct tc_action { void *priv; - struct tc_action_ops *ops; + const struct tc_action_ops *ops; __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ __u32 order; struct tc_action *next; @@ -86,7 +86,7 @@ struct tc_action_ops { __u32 type; /* TBD to match kind */ __u32 capab; /* capabilities includes 4 bit version */ struct module *owner; - int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); + int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *); int (*get_stats)(struct sk_buff *, struct tc_action *); int (*dump)(struct sk_buff *, struct tc_action *, int, int); int (*cleanup)(struct tc_action *, int bind); @@ -115,7 +115,7 @@ extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo); extern int tcf_register_action(struct tc_action_ops *a); extern int tcf_unregister_action(struct tc_action_ops *a); extern void tcf_action_destroy(struct tc_action *a, int bind); -extern int tcf_action_exec(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res); +extern int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res); extern struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); extern struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, char *n, int ovr, int bind); extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/include/net/arp.h b/include/net/arp.h index 91f0568a04e..4979af8b155 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -8,6 +8,36 @@ extern struct neigh_table arp_tbl; +static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd) +{ + u32 val = key ^ dev->ifindex; + + return val * hash_rnd; +} + +static inline struct neighbour *__ipv4_neigh_lookup(struct neigh_table *tbl, struct net_device *dev, u32 key) +{ + struct neigh_hash_table *nht; + struct neighbour *n; + u32 hash_val; + + rcu_read_lock_bh(); + nht = rcu_dereference_bh(tbl->nht); + hash_val = arp_hashfn(key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); + for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); + n != NULL; + n = rcu_dereference_bh(n->next)) { + if (n->dev == dev && *(u32 *)n->primary_key == key) { + if (!atomic_inc_not_zero(&n->refcnt)) + n = NULL; + break; + } + } + rcu_read_unlock_bh(); + + return n; +} + extern void arp_init(void); extern int arp_find(unsigned char *haddr, struct sk_buff *skb); extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg); @@ -15,7 +45,6 @@ extern void arp_send(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *th); -extern int arp_bind_neighbour(struct dst_entry *dst); extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); extern void arp_ifdown(struct net_device *dev); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 43750439c52..e727555d4ee 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -56,6 +56,7 @@ #define BT_SECURITY 4 struct bt_security { __u8 level; + __u8 key_size; }; #define BT_SECURITY_SDP 0 #define BT_SECURITY_LOW 1 @@ -69,9 +70,19 @@ struct bt_security { #define BT_FLUSHABLE_OFF 0 #define BT_FLUSHABLE_ON 1 -#define BT_INFO(fmt, arg...) printk(KERN_INFO "Bluetooth: " fmt "\n" , ## arg) -#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg) -#define BT_DBG(fmt, arg...) pr_debug("%s: " fmt "\n" , __func__ , ## arg) +#define BT_POWER 9 +struct bt_power { + __u8 force_active; +}; +#define BT_POWER_FORCE_ACTIVE_OFF 0 +#define BT_POWER_FORCE_ACTIVE_ON 1 + +__attribute__((format (printf, 2, 3))) +int bt_printk(const char *level, const char *fmt, ...); + +#define BT_INFO(fmt, arg...) bt_printk(KERN_INFO, pr_fmt(fmt), ##arg) +#define BT_ERR(fmt, arg...) bt_printk(KERN_ERR, pr_fmt(fmt), ##arg) +#define BT_DBG(fmt, arg...) pr_debug(fmt "\n", ##arg) /* Connection and socket states */ enum { @@ -130,7 +141,8 @@ int bt_sock_register(int proto, const struct net_proto_family *ops); int bt_sock_unregister(int proto); void bt_sock_link(struct bt_sock_list *l, struct sock *s); void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); -int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); +int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); @@ -150,6 +162,7 @@ struct bt_skb_cb { __u8 retries; __u8 sar; unsigned short channel; + __u8 force_active; }; #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb)) @@ -164,8 +177,8 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) return skb; } -static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long len, - int nb, int *err) +static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, + unsigned long len, int nb, int *err) { struct sk_buff *skb; @@ -195,7 +208,7 @@ out: return NULL; } -int bt_err(__u16 code); +int bt_to_errno(__u16 code); extern int hci_sock_init(void); extern void hci_sock_cleanup(void); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 0c20227e57f..be30aabe7b8 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -211,11 +211,16 @@ enum { #define LMP_EDR_3S_ESCO 0x80 #define LMP_EXT_INQ 0x01 +#define LMP_SIMUL_LE_BR 0x02 #define LMP_SIMPLE_PAIR 0x08 #define LMP_NO_FLUSH 0x40 #define LMP_LSTO 0x01 #define LMP_INQ_TX_PWR 0x02 +#define LMP_EXTFEATURES 0x80 + +/* Extended LMP features */ +#define LMP_HOST_LE 0x02 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -254,6 +259,10 @@ enum { #define HCI_LK_UNAUTH_COMBINATION 0x04 #define HCI_LK_AUTH_COMBINATION 0x05 #define HCI_LK_CHANGED_COMBINATION 0x06 +/* The spec doesn't define types for SMP keys */ +#define HCI_LK_SMP_LTK 0x81 +#define HCI_LK_SMP_IRK 0x82 +#define HCI_LK_SMP_CSRK 0x83 /* ----- HCI Commands ---- */ #define HCI_OP_NOP 0x0000 @@ -653,6 +662,12 @@ struct hci_rp_read_local_oob_data { #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 +#define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d +struct hci_cp_write_le_host_supported { + __u8 le; + __u8 simul; +} __packed; + #define HCI_OP_READ_LOCAL_VERSION 0x1001 struct hci_rp_read_local_version { __u8 status; @@ -676,6 +691,9 @@ struct hci_rp_read_local_features { } __packed; #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 +struct hci_cp_read_local_ext_features { + __u8 page; +} __packed; struct hci_rp_read_local_ext_features { __u8 status; __u8 page; @@ -710,6 +728,12 @@ struct hci_rp_le_read_buffer_size { __u8 le_max_pkt; } __packed; +#define HCI_OP_LE_SET_SCAN_ENABLE 0x200c +struct hci_cp_le_set_scan_enable { + __u8 enable; + __u8 filter_dup; +} __packed; + #define HCI_OP_LE_CREATE_CONN 0x200d struct hci_cp_le_create_conn { __le16 scan_interval; @@ -739,6 +763,33 @@ struct hci_cp_le_conn_update { __le16 max_ce_len; } __packed; +#define HCI_OP_LE_START_ENC 0x2019 +struct hci_cp_le_start_enc { + __le16 handle; + __u8 rand[8]; + __le16 ediv; + __u8 ltk[16]; +} __packed; + +#define HCI_OP_LE_LTK_REPLY 0x201a +struct hci_cp_le_ltk_reply { + __le16 handle; + __u8 ltk[16]; +} __packed; +struct hci_rp_le_ltk_reply { + __u8 status; + __le16 handle; +} __packed; + +#define HCI_OP_LE_LTK_NEG_REPLY 0x201b +struct hci_cp_le_ltk_neg_reply { + __le16 handle; +} __packed; +struct hci_rp_le_ltk_neg_reply { + __u8 status; + __le16 handle; +} __packed; + /* ---- HCI Events ---- */ #define HCI_EV_INQUIRY_COMPLETE 0x01 @@ -1029,6 +1080,32 @@ struct hci_ev_le_conn_complete { __u8 clk_accurancy; } __packed; +#define HCI_EV_LE_LTK_REQ 0x05 +struct hci_ev_le_ltk_req { + __le16 handle; + __u8 random[8]; + __le16 ediv; +} __packed; + +/* Advertising report event types */ +#define ADV_IND 0x00 +#define ADV_DIRECT_IND 0x01 +#define ADV_SCAN_IND 0x02 +#define ADV_NONCONN_IND 0x03 +#define ADV_SCAN_RSP 0x04 + +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 + +#define HCI_EV_LE_ADVERTISING_REPORT 0x02 +struct hci_ev_le_advertising_info { + __u8 evt_type; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 length; + __u8 data[0]; +} __packed; + /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6c994c004d1..8f441b8b296 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include <linux/interrupt.h> #include <net/bluetooth/hci.h> /* HCI upper protocols */ @@ -74,12 +75,28 @@ struct bt_uuid { u8 svc_hint; }; +struct key_master_id { + __le16 ediv; + u8 rand[8]; +} __packed; + +struct link_key_data { + bdaddr_t bdaddr; + u8 type; + u8 val[16]; + u8 pin_len; + u8 dlen; + u8 data[0]; +} __packed; + struct link_key { struct list_head list; bdaddr_t bdaddr; u8 type; u8 val[16]; u8 pin_len; + u8 dlen; + u8 data[0]; }; struct oob_data { @@ -89,6 +106,12 @@ struct oob_data { u8 randomizer[16]; }; +struct adv_entry { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; +}; + #define NUM_REASSEMBLY 4 struct hci_dev { struct list_head list; @@ -107,6 +130,7 @@ struct hci_dev { __u8 major_class; __u8 minor_class; __u8 features[8]; + __u8 extfeatures[8]; __u8 commands[64]; __u8 ssp_mode; __u8 hci_ver; @@ -171,6 +195,8 @@ struct hci_dev { __u16 init_last_cmd; + struct crypto_blkcipher *tfm; + struct inquiry_cache inq_cache; struct hci_conn_hash conn_hash; struct list_head blacklist; @@ -181,6 +207,9 @@ struct hci_dev { struct list_head remote_oob_data; + struct list_head adv_entries; + struct timer_list adv_timer; + struct hci_dev_stats stat; struct sk_buff_head driver_init; @@ -212,9 +241,9 @@ struct hci_conn { struct list_head list; atomic_t refcnt; - spinlock_t lock; bdaddr_t dst; + __u8 dst_type; __u16 handle; __u16 state; __u8 mode; @@ -233,6 +262,7 @@ struct hci_conn { __u8 sec_level; __u8 pending_sec_level; __u8 pin_length; + __u8 enc_key_size; __u8 io_capability; __u8 power_save; __u16 disc_timeout; @@ -259,7 +289,6 @@ struct hci_conn { struct hci_dev *hdev; void *l2cap_data; void *sco_data; - void *priv; struct hci_conn *link; @@ -307,12 +336,14 @@ static inline long inquiry_entry_age(struct inquiry_entry *e) return jiffies - e->timestamp; } -struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr); void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data); /* ----- HCI Connections ----- */ enum { HCI_CONN_AUTH_PEND, + HCI_CONN_REAUTH_PEND, HCI_CONN_ENCRYPT_PEND, HCI_CONN_RSWITCH_PEND, HCI_CONN_MODE_CHANGE_PEND, @@ -420,14 +451,15 @@ int hci_conn_del(struct hci_conn *conn); void hci_conn_hash_flush(struct hci_dev *hdev); void hci_conn_check_pending(struct hci_dev *hdev); -struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type); +struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u8 sec_level, __u8 auth_type); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type); int hci_conn_change_link_key(struct hci_conn *conn); int hci_conn_switch_role(struct hci_conn *conn, __u8 role); -void hci_conn_enter_active_mode(struct hci_conn *conn); +void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_enter_sniff_mode(struct hci_conn *conn); void hci_conn_hold_device(struct hci_conn *conn); @@ -449,10 +481,12 @@ static inline void hci_conn_put(struct hci_conn *conn) timeo = msecs_to_jiffies(conn->disc_timeout); if (!conn->out) timeo *= 2; - } else + } else { timeo = msecs_to_jiffies(10); - } else + } + } else { timeo = msecs_to_jiffies(10); + } mod_timer(&conn->disc_timer, jiffies + timeo); } } @@ -511,6 +545,8 @@ int hci_inquiry(void __user *arg); struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_blacklist_clear(struct hci_dev *hdev); +int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr); +int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_uuids_clear(struct hci_dev *hdev); @@ -518,6 +554,11 @@ int hci_link_keys_clear(struct hci_dev *hdev); struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len); +struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]); +struct link_key *hci_find_link_key_type(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type); +int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, + u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16]); int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_remote_oob_data_clear(struct hci_dev *hdev); @@ -527,6 +568,12 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, u8 *randomizer); int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr); +#define ADV_CLEAR_TIMEOUT (3*60*HZ) /* Three minutes */ +int hci_adv_entries_clear(struct hci_dev *hdev); +struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr); +int hci_add_adv_entry(struct hci_dev *hdev, + struct hci_ev_le_advertising_info *ev); + void hci_del_off_timer(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); @@ -553,6 +600,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_no_flush_capable(dev) ((dev)->features[6] & LMP_NO_FLUSH) #define lmp_le_capable(dev) ((dev)->features[4] & LMP_LE) +/* ----- Extended LMP capabilities ----- */ +#define lmp_host_le_capable(dev) ((dev)->extfeatures[0] & LMP_HOST_LE) + /* ----- HCI protocols ----- */ struct hci_proto { char *name; @@ -561,16 +611,20 @@ struct hci_proto { void *priv; - int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type); + int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 type); int (*connect_cfm) (struct hci_conn *conn, __u8 status); int (*disconn_ind) (struct hci_conn *conn); int (*disconn_cfm) (struct hci_conn *conn, __u8 reason); - int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags); + int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, + __u16 flags); int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb); - int (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); + int (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); }; -static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) +static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 type) { register struct hci_proto *hp; int mask = 0; @@ -656,7 +710,8 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) conn->security_cfm_cb(conn, status); } -static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) +static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, + __u8 encrypt) { register struct hci_proto *hp; @@ -681,7 +736,8 @@ struct hci_cb { char *name; - void (*security_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt); + void (*security_cfm) (struct hci_conn *conn, __u8 status, + __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; @@ -707,13 +763,17 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) read_unlock_bh(&hci_cb_list_lock); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, + __u8 encrypt) { struct list_head *p; if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + hci_proto_encrypt_cfm(conn, status, encrypt); read_lock_bh(&hci_cb_list_lock); @@ -738,7 +798,8 @@ static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) read_unlock_bh(&hci_cb_list_lock); } -static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) +static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, + __u8 role) { struct list_head *p; @@ -830,4 +891,9 @@ void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result); void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], + __u8 ltk[16]); +void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16]); +void hci_le_ltk_neg_reply(struct hci_conn *conn); + #endif /* __HCI_CORE_H */ diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index d09c9b1118e..4f34ad25e75 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -37,7 +37,6 @@ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 -#define L2CAP_LOCAL_BUSY_TRIES 12 #define L2CAP_LE_DEFAULT_MTU 23 #define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */ @@ -130,6 +129,12 @@ struct l2cap_conninfo { #define L2CAP_SDU_END 0x8000 #define L2CAP_SDU_CONTINUE 0xC000 +/* L2CAP Command rej. reasons */ +#define L2CAP_REJ_NOT_UNDERSTOOD 0x0000 +#define L2CAP_REJ_MTU_EXCEEDED 0x0001 +#define L2CAP_REJ_INVALID_CID 0x0002 + + /* L2CAP structures */ struct l2cap_hdr { __le16 len; @@ -144,8 +149,19 @@ struct l2cap_cmd_hdr { } __packed; #define L2CAP_CMD_HDR_SIZE 4 -struct l2cap_cmd_rej { +struct l2cap_cmd_rej_unk { + __le16 reason; +} __packed; + +struct l2cap_cmd_rej_mtu { + __le16 reason; + __le16 max_mtu; +} __packed; + +struct l2cap_cmd_rej_cid { __le16 reason; + __le16 scid; + __le16 dcid; } __packed; struct l2cap_conn_req { @@ -287,6 +303,10 @@ struct l2cap_chan { struct l2cap_conn *conn; + __u8 state; + + atomic_t refcnt; + __le16 psm; __u16 dcid; __u16 scid; @@ -295,6 +315,7 @@ struct l2cap_chan { __u16 omtu; __u16 flush_to; __u8 mode; + __u8 chan_type; __le16 sport; @@ -302,6 +323,7 @@ struct l2cap_chan { __u8 role_switch; __u8 force_reliable; __u8 flushable; + __u8 force_active; __u8 ident; @@ -318,8 +340,8 @@ struct l2cap_chan { __u16 monitor_timeout; __u16 mps; - __u8 conf_state; - __u16 conn_state; + unsigned long conf_state; + unsigned long conn_state; __u8 next_tx_seq; __u8 expected_ack_seq; @@ -339,18 +361,29 @@ struct l2cap_chan { __u8 remote_max_tx; __u16 remote_mps; + struct timer_list chan_timer; struct timer_list retrans_timer; struct timer_list monitor_timer; struct timer_list ack_timer; struct sk_buff *tx_send_head; struct sk_buff_head tx_q; struct sk_buff_head srej_q; - struct sk_buff_head busy_q; - struct work_struct busy_work; struct list_head srej_l; struct list_head list; struct list_head global_l; + + void *data; + struct l2cap_ops *ops; +}; + +struct l2cap_ops { + char *name; + + struct l2cap_chan *(*new_connection) (void *data); + int (*recv) (void *data, struct sk_buff *skb); + void (*close) (void *data); + void (*state_change) (void *data, int state); }; struct l2cap_conn { @@ -376,6 +409,15 @@ struct l2cap_conn { __u8 disc_reason; + __u8 preq[7]; /* SMP Pairing Request */ + __u8 prsp[7]; /* SMP Pairing Response */ + __u8 prnd[16]; /* SMP Pairing Random */ + __u8 pcnf[16]; /* SMP Pairing Confirm */ + __u8 tk[16]; /* SMP Temporary Key */ + __u8 smp_key_size; + + struct timer_list security_timer; + struct list_head chan_l; rwlock_t chan_lock; }; @@ -384,44 +426,57 @@ struct l2cap_conn { #define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04 #define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08 +#define L2CAP_CHAN_RAW 1 +#define L2CAP_CHAN_CONN_LESS 2 +#define L2CAP_CHAN_CONN_ORIENTED 3 + /* ----- L2CAP socket info ----- */ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) struct l2cap_pinfo { struct bt_sock bt; struct l2cap_chan *chan; + struct sk_buff *rx_busy_skb; }; -#define L2CAP_CONF_REQ_SENT 0x01 -#define L2CAP_CONF_INPUT_DONE 0x02 -#define L2CAP_CONF_OUTPUT_DONE 0x04 -#define L2CAP_CONF_MTU_DONE 0x08 -#define L2CAP_CONF_MODE_DONE 0x10 -#define L2CAP_CONF_CONNECT_PEND 0x20 -#define L2CAP_CONF_NO_FCS_RECV 0x40 -#define L2CAP_CONF_STATE2_DEVICE 0x80 +enum { + CONF_REQ_SENT, + CONF_INPUT_DONE, + CONF_OUTPUT_DONE, + CONF_MTU_DONE, + CONF_MODE_DONE, + CONF_CONNECT_PEND, + CONF_NO_FCS_RECV, + CONF_STATE2_DEVICE, +}; #define L2CAP_CONF_MAX_CONF_REQ 2 #define L2CAP_CONF_MAX_CONF_RSP 2 -#define L2CAP_CONN_SAR_SDU 0x0001 -#define L2CAP_CONN_SREJ_SENT 0x0002 -#define L2CAP_CONN_WAIT_F 0x0004 -#define L2CAP_CONN_SREJ_ACT 0x0008 -#define L2CAP_CONN_SEND_PBIT 0x0010 -#define L2CAP_CONN_REMOTE_BUSY 0x0020 -#define L2CAP_CONN_LOCAL_BUSY 0x0040 -#define L2CAP_CONN_REJ_ACT 0x0080 -#define L2CAP_CONN_SEND_FBIT 0x0100 -#define L2CAP_CONN_RNR_SENT 0x0200 -#define L2CAP_CONN_SAR_RETRY 0x0400 - -#define __mod_retrans_timer() mod_timer(&chan->retrans_timer, \ - jiffies + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO)); -#define __mod_monitor_timer() mod_timer(&chan->monitor_timer, \ - jiffies + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO)); -#define __mod_ack_timer() mod_timer(&chan->ack_timer, \ - jiffies + msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); +enum { + CONN_SAR_SDU, + CONN_SREJ_SENT, + CONN_WAIT_F, + CONN_SREJ_ACT, + CONN_SEND_PBIT, + CONN_REMOTE_BUSY, + CONN_LOCAL_BUSY, + CONN_REJ_ACT, + CONN_SEND_FBIT, + CONN_RNR_SENT, +}; + +#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) +#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) +#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ + L2CAP_DEFAULT_RETRANS_TO); +#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) +#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \ + L2CAP_DEFAULT_MONITOR_TO); +#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) +#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ + L2CAP_DEFAULT_ACK_TO); +#define __clear_ack_timer(c) l2cap_clear_timer(c, &c->ack_timer) static inline int l2cap_tx_window_full(struct l2cap_chan *ch) { @@ -446,32 +501,17 @@ extern int disable_ertm; int l2cap_init_sockets(void); void l2cap_cleanup_sockets(void); -void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data); void __l2cap_connect_rsp_defer(struct l2cap_chan *chan); int __l2cap_wait_ack(struct sock *sk); -struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len); -struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len); -struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen); -int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len); -void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb); -void l2cap_streaming_send(struct l2cap_chan *chan); -int l2cap_ertm_send(struct l2cap_chan *chan); - int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm); int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid); -void l2cap_sock_set_timer(struct sock *sk, long timeout); -void l2cap_sock_clear_timer(struct sock *sk); -void __l2cap_sock_close(struct sock *sk, int reason); -void l2cap_sock_kill(struct sock *sk); -void l2cap_sock_init(struct sock *sk, struct sock *parent); -struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, - int proto, gfp_t prio); -void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err); struct l2cap_chan *l2cap_chan_create(struct sock *sk); -void l2cap_chan_del(struct l2cap_chan *chan, int err); +void l2cap_chan_close(struct l2cap_chan *chan, int reason); void l2cap_chan_destroy(struct l2cap_chan *chan); int l2cap_chan_connect(struct l2cap_chan *chan); +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); +void l2cap_chan_busy(struct l2cap_chan *chan, int busy); #endif /* __L2CAP_H */ diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 4899286ed4e..5428fd32cce 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -101,6 +101,8 @@ struct mgmt_key_info { u8 type; u8 val[16]; u8 pin_len; + u8 dlen; + u8 data[0]; } __packed; #define MGMT_OP_LOAD_KEYS 0x000D @@ -199,6 +201,16 @@ struct mgmt_cp_remove_remote_oob_data { #define MGMT_OP_STOP_DISCOVERY 0x001C +#define MGMT_OP_BLOCK_DEVICE 0x001D +struct mgmt_cp_block_device { + bdaddr_t bdaddr; +} __packed; + +#define MGMT_OP_UNBLOCK_DEVICE 0x001E +struct mgmt_cp_unblock_device { + bdaddr_t bdaddr; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 6eac4a760c3..d5eee2093b1 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -234,7 +234,8 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, /* ---- RFCOMM DLCs (channels) ---- */ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); void rfcomm_dlc_free(struct rfcomm_dlc *d); -int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); +int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, + u8 channel); int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason); int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb); int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig); @@ -271,7 +272,8 @@ static inline void rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) } /* ---- RFCOMM sessions ---- */ -void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst); +void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, + bdaddr_t *dst); static inline void rfcomm_session_hold(struct rfcomm_session *s) { @@ -312,7 +314,8 @@ struct rfcomm_pinfo { int rfcomm_init_sockets(void); void rfcomm_cleanup_sockets(void); -int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d); +int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, + struct rfcomm_dlc **d); /* ---- RFCOMM TTY ---- */ #define RFCOMM_MAX_DEV 256 diff --git a/include/net/bluetooth/smp.h b/include/net/bluetooth/smp.h index 8f2edbf979d..46c45761230 100644 --- a/include/net/bluetooth/smp.h +++ b/include/net/bluetooth/smp.h @@ -1,3 +1,25 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + #ifndef __SMP_H #define __SMP_H @@ -16,6 +38,23 @@ struct smp_cmd_pairing { __u8 resp_key_dist; } __packed; +#define SMP_IO_DISPLAY_ONLY 0x00 +#define SMP_IO_DISPLAY_YESNO 0x01 +#define SMP_IO_KEYBOARD_ONLY 0x02 +#define SMP_IO_NO_INPUT_OUTPUT 0x03 +#define SMP_IO_KEYBOARD_DISPLAY 0x04 + +#define SMP_OOB_NOT_PRESENT 0x00 +#define SMP_OOB_PRESENT 0x01 + +#define SMP_DIST_ENC_KEY 0x01 +#define SMP_DIST_ID_KEY 0x02 +#define SMP_DIST_SIGN 0x04 + +#define SMP_AUTH_NONE 0x00 +#define SMP_AUTH_BONDING 0x01 +#define SMP_AUTH_MITM 0x04 + #define SMP_CMD_PAIRING_CONFIRM 0x03 struct smp_cmd_pairing_confirm { __u8 confirm_val[16]; @@ -73,4 +112,12 @@ struct smp_cmd_security_req { #define SMP_UNSPECIFIED 0x08 #define SMP_REPEATED_ATTEMPTS 0x09 +#define SMP_MIN_ENC_KEY_SIZE 7 +#define SMP_MAX_ENC_KEY_SIZE 16 + +/* SMP Commands */ +int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); +int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); +int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); + #endif /* __SMP_H */ diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h new file mode 100644 index 00000000000..c5dedd87b4c --- /dev/null +++ b/include/net/caif/caif_hsi.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) ST-Ericsson AB 2010 + * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com + * Author: Daniel Martensson / daniel.martensson@stericsson.com + * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CAIF_HSI_H_ +#define CAIF_HSI_H_ + +#include <net/caif/caif_layer.h> +#include <net/caif/caif_device.h> +#include <linux/atomic.h> + +/* + * Maximum number of CAIF frames that can reside in the same HSI frame. + */ +#define CFHSI_MAX_PKTS 15 + +/* + * Maximum number of bytes used for the frame that can be embedded in the + * HSI descriptor. + */ +#define CFHSI_MAX_EMB_FRM_SZ 96 + +/* + * Decides if HSI buffers should be prefilled with 0xFF pattern for easier + * debugging. Both TX and RX buffers will be filled before the transfer. + */ +#define CFHSI_DBG_PREFILL 0 + +/* Structure describing a HSI packet descriptor. */ +#pragma pack(1) /* Byte alignment. */ +struct cfhsi_desc { + u8 header; + u8 offset; + u16 cffrm_len[CFHSI_MAX_PKTS]; + u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; +}; +#pragma pack() /* Default alignment. */ + +/* Size of the complete HSI packet descriptor. */ +#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) + +/* + * Size of the complete HSI packet descriptor excluding the optional embedded + * CAIF frame. + */ +#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) + +/* + * Maximum bytes transferred in one transfer. + */ +/* TODO: 4096 is temporary... */ +#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * 4096) + +/* Size of the complete HSI TX buffer. */ +#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) + +/* Size of the complete HSI RX buffer. */ +#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) + +/* Bitmasks for the HSI descriptor. */ +#define CFHSI_PIGGY_DESC (0x01 << 7) + +#define CFHSI_TX_STATE_IDLE 0 +#define CFHSI_TX_STATE_XFER 1 + +#define CFHSI_RX_STATE_DESC 0 +#define CFHSI_RX_STATE_PAYLOAD 1 + +/* Bitmasks for power management. */ +#define CFHSI_WAKE_UP 0 +#define CFHSI_WAKE_UP_ACK 1 +#define CFHSI_WAKE_DOWN_ACK 2 +#define CFHSI_AWAKE 3 +#define CFHSI_PENDING_RX 4 +#define CFHSI_SHUTDOWN 6 +#define CFHSI_FLUSH_FIFO 7 + +#ifndef CFHSI_INACTIVITY_TOUT +#define CFHSI_INACTIVITY_TOUT (1 * HZ) +#endif /* CFHSI_INACTIVITY_TOUT */ + +#ifndef CFHSI_WAKEUP_TOUT +#define CFHSI_WAKEUP_TOUT (3 * HZ) +#endif /* CFHSI_WAKEUP_TOUT */ + + +/* Structure implemented by the CAIF HSI driver. */ +struct cfhsi_drv { + void (*tx_done_cb) (struct cfhsi_drv *drv); + void (*rx_done_cb) (struct cfhsi_drv *drv); + void (*wake_up_cb) (struct cfhsi_drv *drv); + void (*wake_down_cb) (struct cfhsi_drv *drv); +}; + +/* Structure implemented by HSI device. */ +struct cfhsi_dev { + int (*cfhsi_up) (struct cfhsi_dev *dev); + int (*cfhsi_down) (struct cfhsi_dev *dev); + int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_dev *dev); + int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev); + int (*cfhsi_wake_up) (struct cfhsi_dev *dev); + int (*cfhsi_wake_down) (struct cfhsi_dev *dev); + int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy); + int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev); + struct cfhsi_drv *drv; +}; + +/* Structure implemented by CAIF HSI drivers. */ +struct cfhsi { + struct caif_dev_common cfdev; + struct net_device *ndev; + struct platform_device *pdev; + struct sk_buff_head qhead; + struct cfhsi_drv drv; + struct cfhsi_dev *dev; + int tx_state; + int rx_state; + int rx_len; + u8 *rx_ptr; + u8 *tx_buf; + u8 *rx_buf; + spinlock_t lock; + int flow_off_sent; + u32 q_low_mark; + u32 q_high_mark; + struct list_head list; + struct work_struct wake_up_work; + struct work_struct wake_down_work; + struct work_struct rx_done_work; + struct work_struct tx_done_work; + struct workqueue_struct *wq; + wait_queue_head_t wake_up_wait; + wait_queue_head_t wake_down_wait; + wait_queue_head_t flush_fifo_wait; + struct timer_list timer; + unsigned long bits; +}; + +extern struct platform_driver cfhsi_driver; + +#endif /* CAIF_HSI_H_ */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 396e8fc8910..d17f47fc9e3 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -777,6 +777,7 @@ struct cfg80211_ssid { * @n_channels: total number of channels to scan * @ie: optional information element(s) to add into Probe Request or %NULL * @ie_len: length of ie in octets + * @rates: bitmap of rates to advertise for each band * @wiphy: the wiphy this was for * @dev: the interface * @aborted: (internal) scan request was notified as aborted @@ -788,6 +789,8 @@ struct cfg80211_scan_request { const u8 *ie; size_t ie_len; + u32 rates[IEEE80211_NUM_BANDS]; + /* internal */ struct wiphy *wiphy; struct net_device *dev; @@ -1146,14 +1149,32 @@ struct cfg80211_wowlan_trig_pkt_pattern { * @magic_pkt: wake up on receiving magic packet * @patterns: wake up on receiving packet matching a pattern * @n_patterns: number of patterns + * @gtk_rekey_failure: wake up on GTK rekey failure + * @eap_identity_req: wake up on EAP identity request packet + * @four_way_handshake: wake up on 4-way handshake + * @rfkill_release: wake up when rfkill is released */ struct cfg80211_wowlan { - bool any, disconnect, magic_pkt; + bool any, disconnect, magic_pkt, gtk_rekey_failure, + eap_identity_req, four_way_handshake, + rfkill_release; struct cfg80211_wowlan_trig_pkt_pattern *patterns; int n_patterns; }; /** + * struct cfg80211_gtk_rekey_data - rekey data + * @kek: key encryption key + * @kck: key confirmation key + * @replay_ctr: replay counter + */ +struct cfg80211_gtk_rekey_data { + u8 kek[NL80211_KEK_LEN]; + u8 kck[NL80211_KCK_LEN]; + u8 replay_ctr[NL80211_REPLAY_CTR_LEN]; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -1197,6 +1218,8 @@ struct cfg80211_wowlan { * * @set_default_mgmt_key: set the default management frame key on an interface * + * @set_rekey_data: give the data necessary for GTK rekeying to the driver + * * @add_beacon: Add a beacon with given parameters, @head, @interval * and @dtim_period will be valid, @tail is optional. * @set_beacon: Change the beacon parameters for an access point mode @@ -1284,6 +1307,12 @@ struct cfg80211_wowlan { * frame on another channel * * @testmode_cmd: run a test mode command + * @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be + * used by the function, but 0 and 1 must not be touched. Additionally, + * return error codes other than -ENOBUFS and -ENOENT will terminate the + * dump and return to userspace with an error, so be careful. If any data + * was passed in from userspace then the data/len arguments will be present + * and point to the data contained in %NL80211_ATTR_TESTDATA. * * @set_bitrate_mask: set the bitrate mask configuration * @@ -1433,6 +1462,9 @@ struct cfg80211_ops { #ifdef CONFIG_NL80211_TESTMODE int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len); + int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len); #endif int (*set_bitrate_mask)(struct wiphy *wiphy, @@ -1490,6 +1522,9 @@ struct cfg80211_ops { struct net_device *dev, struct cfg80211_sched_scan_request *request); int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev); + + int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_gtk_rekey_data *data); }; /* @@ -1647,11 +1682,21 @@ struct ieee80211_txrx_stypes { * @WIPHY_WOWLAN_MAGIC_PKT: supports wakeup on magic packet * (see nl80211.h) * @WIPHY_WOWLAN_DISCONNECT: supports wakeup on disconnect + * @WIPHY_WOWLAN_SUPPORTS_GTK_REKEY: supports GTK rekeying while asleep + * @WIPHY_WOWLAN_GTK_REKEY_FAILURE: supports wakeup on GTK rekey failure + * @WIPHY_WOWLAN_EAP_IDENTITY_REQ: supports wakeup on EAP identity request + * @WIPHY_WOWLAN_4WAY_HANDSHAKE: supports wakeup on 4-way handshake failure + * @WIPHY_WOWLAN_RFKILL_RELEASE: supports wakeup on RF-kill release */ enum wiphy_wowlan_support_flags { - WIPHY_WOWLAN_ANY = BIT(0), - WIPHY_WOWLAN_MAGIC_PKT = BIT(1), - WIPHY_WOWLAN_DISCONNECT = BIT(2), + WIPHY_WOWLAN_ANY = BIT(0), + WIPHY_WOWLAN_MAGIC_PKT = BIT(1), + WIPHY_WOWLAN_DISCONNECT = BIT(2), + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY = BIT(3), + WIPHY_WOWLAN_GTK_REKEY_FAILURE = BIT(4), + WIPHY_WOWLAN_EAP_IDENTITY_REQ = BIT(5), + WIPHY_WOWLAN_4WAY_HANDSHAKE = BIT(6), + WIPHY_WOWLAN_RFKILL_RELEASE = BIT(7), }; /** @@ -1716,9 +1761,13 @@ struct wiphy_wowlan_support { * this variable determines its size * @max_scan_ssids: maximum number of SSIDs the device can scan for in * any given scan + * @max_sched_scan_ssids: maximum number of SSIDs the device can scan + * for in any given scheduled scan * @max_scan_ie_len: maximum length of user-controlled IEs device can * add to probe request frames transmitted during a scan, must not * include fixed IEs like supported rates + * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled + * scans * @coverage_class: current coverage class * @fw_version: firmware version for ethtool reporting * @hw_version: hardware version for ethtool reporting @@ -1770,7 +1819,9 @@ struct wiphy { int bss_priv_size; u8 max_scan_ssids; + u8 max_sched_scan_ssids; u16 max_scan_ie_len; + u16 max_sched_scan_ie_len; int n_cipher_suites; const u32 *cipher_suites; @@ -2849,8 +2900,10 @@ struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp); #define CFG80211_TESTMODE_CMD(cmd) .testmode_cmd = (cmd), +#define CFG80211_TESTMODE_DUMP(cmd) .testmode_dump = (cmd), #else #define CFG80211_TESTMODE_CMD(cmd) +#define CFG80211_TESTMODE_DUMP(cmd) #endif /** @@ -3022,6 +3075,16 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, void cfg80211_cqm_pktloss_notify(struct net_device *dev, const u8 *peer, u32 num_packets, gfp_t gfp); +/** + * cfg80211_gtk_rekey_notify - notify userspace about driver rekeying + * @dev: network device + * @bssid: BSSID of AP (to avoid races) + * @replay_ctr: new replay counter + * @gfp: allocation flags + */ +void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid, + const u8 *replay_ctr, gfp_t gfp); + /* Logging, debugging and troubleshooting/diagnostic helpers. */ /* wiphy_printk helpers, similar to dev_printk */ diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index e5983c9053d..f5aa39997f0 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -28,8 +28,16 @@ struct dcb_app_type { struct list_head list; }; -u8 dcb_setapp(struct net_device *, struct dcb_app *); +int dcb_setapp(struct net_device *, struct dcb_app *); u8 dcb_getapp(struct net_device *, struct dcb_app *); +int dcb_ieee_setapp(struct net_device *, struct dcb_app *); +int dcb_ieee_delapp(struct net_device *, struct dcb_app *); +u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *); + +int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); +int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, + u32 seq, u32 pid); /* * Ops struct for the netlink callbacks. Used by DCB-enabled drivers through @@ -43,6 +51,7 @@ struct dcbnl_rtnl_ops { int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getapp) (struct net_device *, struct dcb_app *); int (*ieee_setapp) (struct net_device *, struct dcb_app *); + int (*ieee_delapp) (struct net_device *, struct dcb_app *); int (*ieee_peer_getets) (struct net_device *, struct ieee_ets *); int (*ieee_peer_getpfc) (struct net_device *, struct ieee_pfc *); diff --git a/include/net/dst.h b/include/net/dst.h index e12ddfb9eb1..29e255796ce 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -37,8 +37,7 @@ struct dst_entry { unsigned long _metrics; unsigned long expires; struct dst_entry *path; - struct neighbour *neighbour; - struct hh_cache *hh; + struct neighbour *_neighbour; #ifdef CONFIG_XFRM struct xfrm_state *xfrm; #else @@ -47,6 +46,14 @@ struct dst_entry { int (*input)(struct sk_buff*); int (*output)(struct sk_buff*); + int flags; +#define DST_HOST 0x0001 +#define DST_NOXFRM 0x0002 +#define DST_NOPOLICY 0x0004 +#define DST_NOHASH 0x0008 +#define DST_NOCACHE 0x0010 +#define DST_NOCOUNT 0x0020 + short error; short obsolete; unsigned short header_len; /* more space at head required */ @@ -62,7 +69,7 @@ struct dst_entry { * (L1_CACHE_SIZE would be too much) */ #ifdef CONFIG_64BIT - long __pad_to_align_refcnt[1]; + long __pad_to_align_refcnt[2]; #endif /* * __refcnt wants to be on a different cache line from @@ -71,13 +78,6 @@ struct dst_entry { atomic_t __refcnt; /* client references */ int __use; unsigned long lastuse; - int flags; -#define DST_HOST 0x0001 -#define DST_NOXFRM 0x0002 -#define DST_NOPOLICY 0x0004 -#define DST_NOHASH 0x0008 -#define DST_NOCACHE 0x0010 -#define DST_NOCOUNT 0x0020 union { struct dst_entry *next; struct rtable __rcu *rt_next; @@ -86,6 +86,16 @@ struct dst_entry { }; }; +static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst) +{ + return dst->_neighbour; +} + +static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh) +{ + dst->_neighbour = neigh; +} + extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); extern const u32 dst_default_metrics[RTAX_MAX]; @@ -371,8 +381,15 @@ static inline void dst_rcu_free(struct rcu_head *head) static inline void dst_confirm(struct dst_entry *dst) { - if (dst) - neigh_confirm(dst->neighbour); + if (dst) { + struct neighbour *n = dst_get_neighbour(dst); + neigh_confirm(n); + } +} + +static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) +{ + return dst->ops->neigh_lookup(dst, daddr); } static inline void dst_link_failure(struct sk_buff *skb) diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index dc074632894..9adb99845a5 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -26,6 +26,7 @@ struct dst_ops { void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, u32 mtu); int (*local_out)(struct sk_buff *skb); + struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, const void *daddr); struct kmem_cache *kmem_cachep; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index d420f28b6d6..82d8d09faa4 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -160,6 +160,38 @@ static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, } /** + * genlmsg_nlhdr - Obtain netlink header from user specified header + * @user_hdr: user header as returned from genlmsg_put() + * @family: generic netlink family + * + * Returns pointer to netlink header. + */ +static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr, + struct genl_family *family) +{ + return (struct nlmsghdr *)((char *)user_hdr - + family->hdrsize - + GENL_HDRLEN - + NLMSG_HDRLEN); +} + +/** + * genl_dump_check_consistent - check if sequence is consistent and advertise if not + * @cb: netlink callback structure that stores the sequence number + * @user_hdr: user header as returned from genlmsg_put() + * @family: generic netlink family + * + * Cf. nl_dump_check_consistent(), this just provides a wrapper to make it + * simpler to use with generic netlink. + */ +static inline void genl_dump_check_consistent(struct netlink_callback *cb, + void *user_hdr, + struct genl_family *family) +{ + nl_dump_check_consistent(cb, genlmsg_nlhdr(user_hdr, family)); +} + +/** * genlmsg_put_reply - Add generic netlink header to a reply message * @skb: socket buffer holding the message * @info: receiver info diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 8a159cc3d68..4233e6f9841 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -32,13 +32,17 @@ struct inet_peer { struct inet_peer __rcu *avl_left, *avl_right; struct inetpeer_addr daddr; __u32 avl_height; - struct list_head unused; - __u32 dtime; /* the time of last use of not - * referenced entries */ - atomic_t refcnt; + + u32 metrics[RTAX_MAX]; + u32 rate_tokens; /* rate limiting for ICMP */ + unsigned long rate_last; + unsigned long pmtu_expires; + u32 pmtu_orig; + u32 pmtu_learned; + struct inetpeer_addr_base redirect_learned; /* * Once inet_peer is queued for deletion (refcnt == -1), following fields - * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics + * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp * We can share memory with rcu_head to help keep inet_peer small. */ union { @@ -47,16 +51,14 @@ struct inet_peer { atomic_t ip_id_count; /* IP ID for the next packet */ __u32 tcp_ts; __u32 tcp_ts_stamp; - u32 metrics[RTAX_MAX]; - u32 rate_tokens; /* rate limiting for ICMP */ - unsigned long rate_last; - unsigned long pmtu_expires; - u32 pmtu_orig; - u32 pmtu_learned; - struct inetpeer_addr_base redirect_learned; }; struct rcu_head rcu; + struct inet_peer *gc_next; }; + + /* following fields might be frequently dirtied */ + __u32 dtime; /* the time of last use of not referenced entries */ + atomic_t refcnt; }; void inet_initpeers(void) __init; @@ -69,7 +71,7 @@ static inline bool inet_metrics_new(const struct inet_peer *p) } /* can be called with or without local BH being disabled */ -struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create); +struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create); static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create) { @@ -104,11 +106,18 @@ static inline void inet_peer_refcheck(const struct inet_peer *p) /* can be called with or without local BH being disabled */ -static inline __u16 inet_getid(struct inet_peer *p, int more) +static inline int inet_getid(struct inet_peer *p, int more) { + int old, new; more++; inet_peer_refcheck(p); - return atomic_add_return(more, &p->ip_id_count) - more; + do { + old = atomic_read(&p->ip_id_count); + new = old + more; + if (!new) + new = 1; + } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old); + return new; } #endif /* _NET_INETPEER_H */ diff --git a/include/net/ip.h b/include/net/ip.h index 66dd4914920..aa76c7a4d9c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -228,8 +228,6 @@ extern struct ctl_path net_ipv4_ctl_path[]; extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; -extern int inet_peer_gc_mintime; -extern int inet_peer_gc_maxtime; /* From ip_output.c */ extern int sysctl_ip_dynaddr; @@ -238,6 +236,11 @@ extern void ipfrag_init(void); extern void ip_static_sysctl_init(void); +static inline bool ip_is_fragment(const struct iphdr *iph) +{ + return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0; +} + #ifdef CONFIG_INET #include <net/dst.h> @@ -401,7 +404,8 @@ enum ip_defrag_users { __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, IP_DEFRAG_VS_IN, IP_DEFRAG_VS_OUT, - IP_DEFRAG_VS_FWD + IP_DEFRAG_VS_FWD, + IP_DEFRAG_AF_PACKET, }; int ip_defrag(struct sk_buff *skb, u32 user); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 477ef75f387..5735a0f979c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -87,7 +87,6 @@ struct rt6_info { struct dst_entry dst; #define rt6i_dev dst.dev -#define rt6i_nexthop dst.neighbour #define rt6i_expires dst.expires /* diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 481f856c650..b1370c4015b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -836,8 +836,6 @@ struct netns_ipvs { int num_services; /* no of virtual services */ rwlock_t rs_lock; /* real services table */ - /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ - struct lock_class_key ctl_key; /* ctl_mutex debuging */ /* Trash for destinations */ struct list_head dest_trash; /* Service counters */ @@ -1089,19 +1087,19 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) /* * IPVS netns init & cleanup functions */ -extern int __ip_vs_estimator_init(struct net *net); -extern int __ip_vs_control_init(struct net *net); -extern int __ip_vs_protocol_init(struct net *net); -extern int __ip_vs_app_init(struct net *net); -extern int __ip_vs_conn_init(struct net *net); -extern int __ip_vs_sync_init(struct net *net); -extern void __ip_vs_conn_cleanup(struct net *net); -extern void __ip_vs_app_cleanup(struct net *net); -extern void __ip_vs_protocol_cleanup(struct net *net); -extern void __ip_vs_control_cleanup(struct net *net); -extern void __ip_vs_estimator_cleanup(struct net *net); -extern void __ip_vs_sync_cleanup(struct net *net); -extern void __ip_vs_service_cleanup(struct net *net); +extern int ip_vs_estimator_net_init(struct net *net); +extern int ip_vs_control_net_init(struct net *net); +extern int ip_vs_protocol_net_init(struct net *net); +extern int ip_vs_app_net_init(struct net *net); +extern int ip_vs_conn_net_init(struct net *net); +extern int ip_vs_sync_net_init(struct net *net); +extern void ip_vs_conn_net_cleanup(struct net *net); +extern void ip_vs_app_net_cleanup(struct net *net); +extern void ip_vs_protocol_net_cleanup(struct net *net); +extern void ip_vs_control_net_cleanup(struct net *net); +extern void ip_vs_estimator_net_cleanup(struct net *net); +extern void ip_vs_sync_net_cleanup(struct net *net); +extern void ip_vs_service_net_cleanup(struct net *net); /* * IPVS application functions @@ -1119,8 +1117,6 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc); extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); -extern int ip_vs_app_init(void); -extern void ip_vs_app_cleanup(void); void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe); void ip_vs_unbind_pe(struct ip_vs_service *svc); @@ -1223,15 +1219,11 @@ extern int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); extern int stop_sync_thread(struct net *net, int state); extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp); -extern int ip_vs_sync_init(void); -extern void ip_vs_sync_cleanup(void); /* * IPVS rate estimator prototypes (from ip_vs_est.c) */ -extern int ip_vs_estimator_init(void); -extern void ip_vs_estimator_cleanup(void); extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats); extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats); extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c033ed00df7..3b5ac1fbff3 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -463,17 +463,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } -static __inline__ void ipv6_select_ident(struct frag_hdr *fhdr) -{ - static u32 ipv6_fragmentation_id = 1; - static DEFINE_SPINLOCK(ip6_id_lock); - - spin_lock_bh(&ip6_id_lock); - fhdr->identification = htonl(ipv6_fragmentation_id); - if (++ipv6_fragmentation_id == 0) - ipv6_fragmentation_id = 1; - spin_unlock_bh(&ip6_id_lock); -} +extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); /* * Prototypes exported by ipv6 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index e6d6a66a8f7..9259e97864d 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -20,6 +20,7 @@ #include <linux/device.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> +#include <asm/unaligned.h> /** * DOC: Introduction @@ -193,6 +194,17 @@ enum ieee80211_bss_change { #define IEEE80211_BSS_ARP_ADDR_LIST_LEN 4 /** + * enum ieee80211_rssi_event - RSSI threshold event + * An indicator for when RSSI goes below/above a certain threshold. + * @RSSI_EVENT_HIGH: AP's rssi crossed the high threshold set by the driver. + * @RSSI_EVENT_LOW: AP's rssi crossed the low threshold set by the driver. + */ +enum ieee80211_rssi_event { + RSSI_EVENT_HIGH, + RSSI_EVENT_LOW, +}; + +/** * struct ieee80211_bss_conf - holds the BSS's changing parameters * * This structure keeps information about a BSS (and an association @@ -933,6 +945,7 @@ enum set_key_cmd { * @aid: AID we assigned to the station if we're an AP * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities + * @wme: indicates whether the STA supports WME. Only valid during AP-mode. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. */ @@ -941,6 +954,7 @@ struct ieee80211_sta { u8 addr[ETH_ALEN]; u16 aid; struct ieee80211_sta_ht_cap ht_cap; + bool wme; /* must be last */ u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); @@ -960,21 +974,6 @@ enum sta_notify_cmd { }; /** - * enum ieee80211_tkip_key_type - get tkip key - * - * Used by drivers which need to get a tkip key for skb. Some drivers need a - * phase 1 key, others need a phase 2 key. A single function allows the driver - * to get the key, this enum indicates what type of key is required. - * - * @IEEE80211_TKIP_P1_KEY: the driver needs a phase 1 key - * @IEEE80211_TKIP_P2_KEY: the driver needs a phase 2 key - */ -enum ieee80211_tkip_key_type { - IEEE80211_TKIP_P1_KEY, - IEEE80211_TKIP_P2_KEY, -}; - -/** * enum ieee80211_hw_flags - hardware flags * * These flags are used to indicate hardware capabilities to @@ -1587,6 +1586,20 @@ enum ieee80211_ampdu_mlme_action { }; /** + * enum ieee80211_tx_sync_type - TX sync type + * @IEEE80211_TX_SYNC_AUTH: sync TX for authentication + * (and possibly also before direct probe) + * @IEEE80211_TX_SYNC_ASSOC: sync TX for association + * @IEEE80211_TX_SYNC_ACTION: sync TX for action frame + * (not implemented yet) + */ +enum ieee80211_tx_sync_type { + IEEE80211_TX_SYNC_AUTH, + IEEE80211_TX_SYNC_ASSOC, + IEEE80211_TX_SYNC_ACTION, +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -1626,6 +1639,10 @@ enum ieee80211_ampdu_mlme_action { * ask the device to suspend. This is only invoked when WoWLAN is * configured, otherwise the device is deconfigured completely and * reconfigured at resume time. + * The driver may also impose special conditions under which it + * wants to use the "normal" suspend (deconfigure), say if it only + * supports WoWLAN when the device is associated. In this case, it + * must return 1 from this function. * * @resume: If WoWLAN was configured, this indicates that mac80211 is * now resuming its operation, after this the device must be fully @@ -1671,6 +1688,26 @@ enum ieee80211_ampdu_mlme_action { * of the bss parameters has changed when a call is made. The callback * can sleep. * + * @tx_sync: Called before a frame is sent to an AP/GO. In the GO case, the + * driver should sync with the GO's powersaving so the device doesn't + * transmit the frame while the GO is asleep. In the regular AP case + * it may be used by drivers for devices implementing other restrictions + * on talking to APs, e.g. due to regulatory enforcement or just HW + * restrictions. + * This function is called for every authentication, association and + * action frame separately since applications might attempt to auth + * with multiple APs before chosing one to associate to. If it returns + * an error, the corresponding authentication, association or frame + * transmission is aborted and reported as having failed. It is always + * called after tuning to the correct channel. + * The callback might be called multiple times before @finish_tx_sync + * (but @finish_tx_sync will be called once for each) but in practice + * this is unlikely to happen. It can also refuse in that case if the + * driver cannot handle that situation. + * This callback can sleep. + * @finish_tx_sync: Called as a counterpart to @tx_sync, unless that returned + * an error. This callback can sleep. + * * @prepare_multicast: Prepare for multicast filter configuration. * This callback is optional, and its return value is passed * to configure_filter(). This callback must be atomic. @@ -1694,6 +1731,12 @@ enum ieee80211_ampdu_mlme_action { * which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY. * The callback must be atomic. * + * @set_rekey_data: If the device supports GTK rekeying, for example while the + * host is suspended, it can assign this callback to retrieve the data + * necessary to do GTK rekeying, this is the KEK, KCK and replay counter. + * After rekeying was done it should (for example during resume) notify + * userspace of the new replay counter using ieee80211_gtk_rekey_notify(). + * * @hw_scan: Ask the hardware to service the scan request, no need to start * the scan state machine in stack. The scan must honour the channel * configuration done by the regulatory agent in the wiphy's @@ -1708,6 +1751,14 @@ enum ieee80211_ampdu_mlme_action { * any error unless this callback returned a negative error code. * The callback can sleep. * + * @cancel_hw_scan: Ask the low-level tp cancel the active hw scan. + * The driver should ask the hardware to cancel the scan (if possible), + * but the scan will be completed only after the driver will call + * ieee80211_scan_completed(). + * This callback is needed for wowlan, to prevent enqueueing a new + * scan_work after the low-level driver was already suspended. + * The callback can sleep. + * * @sched_scan_start: Ask the hardware to start scanning repeatedly at * specific intervals. The driver must call the * ieee80211_sched_scan_results() function whenever it finds results. @@ -1816,6 +1867,7 @@ enum ieee80211_ampdu_mlme_action { * * @testmode_cmd: Implement a cfg80211 test mode command. * The callback can sleep. + * @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep. * * @flush: Flush all pending frames from the hardware queue, making sure * that the hardware queues are empty. If the parameter @drop is set @@ -1860,6 +1912,8 @@ enum ieee80211_ampdu_mlme_action { * @set_bitrate_mask: Set a mask of rates to be used for rate control selection * when transmitting a frame. Currently only legacy rates are handled. * The callback can sleep. + * @rssi_callback: Notify driver when the average RSSI goes above/below + * thresholds that were registered previously. The callback can sleep. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); @@ -1881,6 +1935,14 @@ struct ieee80211_ops { struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed); + + int (*tx_sync)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *bssid, enum ieee80211_tx_sync_type type); + void (*finish_tx_sync)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *bssid, + enum ieee80211_tx_sync_type type); + u64 (*prepare_multicast)(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list); void (*configure_filter)(struct ieee80211_hw *hw, @@ -1897,8 +1959,13 @@ struct ieee80211_ops { struct ieee80211_key_conf *conf, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); + void (*set_rekey_data)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); int (*hw_scan)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_scan_request *req); + void (*cancel_hw_scan)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); int (*sched_scan_start)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, @@ -1936,6 +2003,9 @@ struct ieee80211_ops { void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class); #ifdef CONFIG_NL80211_TESTMODE int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len); + int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, + void *data, int len); #endif void (*flush)(struct ieee80211_hw *hw, bool drop); void (*channel_switch)(struct ieee80211_hw *hw, @@ -1960,6 +2030,8 @@ struct ieee80211_ops { bool (*tx_frames_pending)(struct ieee80211_hw *hw); int (*set_bitrate_mask)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask); + void (*rssi_callback)(struct ieee80211_hw *hw, + enum ieee80211_rssi_event rssi_event); }; /** @@ -2550,21 +2622,136 @@ struct sk_buff * ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif); /** - * ieee80211_get_tkip_key - get a TKIP rc4 for skb + * ieee80211_get_tkip_p1k_iv - get a TKIP phase 1 key for IV32 + * + * This function returns the TKIP phase 1 key for the given IV32. + * + * @keyconf: the parameter passed with the set key + * @iv32: IV32 to get the P1K for + * @p1k: a buffer to which the key will be written, as 5 u16 values + */ +void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf, + u32 iv32, u16 *p1k); + +/** + * ieee80211_get_tkip_p1k - get a TKIP phase 1 key + * + * This function returns the TKIP phase 1 key for the IV32 taken + * from the given packet. + * + * @keyconf: the parameter passed with the set key + * @skb: the packet to take the IV32 value from that will be encrypted + * with this P1K + * @p1k: a buffer to which the key will be written, as 5 u16 values + */ +static inline void ieee80211_get_tkip_p1k(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, u16 *p1k) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + u32 iv32 = get_unaligned_le32(&data[4]); + + ieee80211_get_tkip_p1k_iv(keyconf, iv32, p1k); +} + +/** + * ieee80211_get_tkip_rx_p1k - get a TKIP phase 1 key for RX + * + * This function returns the TKIP phase 1 key for the given IV32 + * and transmitter address. + * + * @keyconf: the parameter passed with the set key + * @ta: TA that will be used with the key + * @iv32: IV32 to get the P1K for + * @p1k: a buffer to which the key will be written, as 5 u16 values + */ +void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, + const u8 *ta, u32 iv32, u16 *p1k); + +/** + * ieee80211_get_tkip_p2k - get a TKIP phase 2 key + * + * This function computes the TKIP RC4 key for the IV values + * in the packet. + * + * @keyconf: the parameter passed with the set key + * @skb: the packet to take the IV32/IV16 values from that will be + * encrypted with this key + * @p2k: a buffer to which the key will be written, 16 bytes + */ +void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, + struct sk_buff *skb, u8 *p2k); + +/** + * struct ieee80211_key_seq - key sequence counter * - * This function computes a TKIP rc4 key for an skb. It computes - * a phase 1 key if needed (iv16 wraps around). This function is to - * be used by drivers which can do HW encryption but need to compute - * to phase 1/2 key in SW. + * @tkip: TKIP data, containing IV32 and IV16 in host byte order + * @ccmp: PN data, most significant byte first (big endian, + * reverse order than in packet) + * @aes_cmac: PN data, most significant byte first (big endian, + * reverse order than in packet) + */ +struct ieee80211_key_seq { + union { + struct { + u32 iv32; + u16 iv16; + } tkip; + struct { + u8 pn[6]; + } ccmp; + struct { + u8 pn[6]; + } aes_cmac; + }; +}; + +/** + * ieee80211_get_key_tx_seq - get key TX sequence counter + * + * @keyconf: the parameter passed with the set key + * @seq: buffer to receive the sequence data + * + * This function allows a driver to retrieve the current TX IV/PN + * for the given key. It must not be called if IV generation is + * offloaded to the device. + * + * Note that this function may only be called when no TX processing + * can be done concurrently, for example when queues are stopped + * and the stop has been synchronized. + */ +void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, + struct ieee80211_key_seq *seq); + +/** + * ieee80211_get_key_rx_seq - get key RX sequence counter * * @keyconf: the parameter passed with the set key - * @skb: the skb for which the key is needed - * @type: TBD - * @key: a buffer to which the key will be written + * @tid: The TID, or -1 for the management frame value (CCMP only); + * the value on TID 0 is also used for non-QoS frames. For + * CMAC, only TID 0 is valid. + * @seq: buffer to receive the sequence data + * + * This function allows a driver to retrieve the current RX IV/PNs + * for the given key. It must not be called if IV checking is done + * by the device and not by mac80211. + * + * Note that this function may only be called when no RX processing + * can be done concurrently. */ -void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, - struct sk_buff *skb, - enum ieee80211_tkip_key_type type, u8 *key); +void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, + int tid, struct ieee80211_key_seq *seq); + +/** + * ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying + * @vif: virtual interface the rekeying was done on + * @bssid: The BSSID of the AP, for checking association + * @replay_ctr: the new replay counter after GTK rekeying + * @gfp: allocation flags + */ +void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, + const u8 *replay_ctr, gfp_t gfp); + /** * ieee80211_wake_queue - wake specific queue * @hw: pointer as obtained from ieee80211_alloc_hw(). @@ -2830,6 +3017,33 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, bool block); /** + * ieee80211_iter_keys - iterate keys programmed into the device + * @hw: pointer obtained from ieee80211_alloc_hw() + * @vif: virtual interface to iterate, may be %NULL for all + * @iter: iterator function that will be called for each key + * @iter_data: custom data to pass to the iterator function + * + * This function can be used to iterate all the keys known to + * mac80211, even those that weren't previously programmed into + * the device. This is intended for use in WoWLAN if the device + * needs reprogramming of the keys during suspend. Note that due + * to locking reasons, it is also only safe to call this at few + * spots since it must hold the RTNL and be able to sleep. + * + * The order in which the keys are iterated matches the order + * in which they were originally installed and handed to the + * set_key callback. + */ +void ieee80211_iter_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data), + void *iter_data); + +/** * ieee80211_ap_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. @@ -2870,6 +3084,29 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif); void ieee80211_connection_loss(struct ieee80211_vif *vif); /** + * ieee80211_resume_disconnect - disconnect from AP after resume + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Instructs mac80211 to disconnect from the AP after resume. + * Drivers can use this after WoWLAN if they know that the + * connection cannot be kept up, for example because keys were + * used while the device was asleep but the replay counters or + * similar cannot be retrieved from the device during resume. + * + * Note that due to implementation issues, if the driver uses + * the reconfiguration functionality during resume the interface + * will still be added as associated first during resume and then + * disconnect normally later. + * + * This function can only be called from the resume callback and + * the driver must not be holding any of its own locks while it + * calls this function, or at least not any locks it needs in the + * key configuration paths (if it supports HW crypto). + */ +void ieee80211_resume_disconnect(struct ieee80211_vif *vif); + +/** * ieee80211_disable_dyn_ps - force mac80211 to temporarily disable dynamic psm * * @vif: &struct ieee80211_vif pointer from the add_interface callback. @@ -2916,6 +3153,16 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, gfp_t gfp); /** + * ieee80211_get_operstate - get the operstate of the vif + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * The driver might need to know the operstate of the net_device + * (specifically, whether the link is IF_OPER_UP after resume) + */ +unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif); + +/** * ieee80211_chswitch_done - Complete channel switch process * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @success: make the channel switch successful or not @@ -2965,6 +3212,23 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw); */ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw); +/** + * ieee80211_stop_rx_ba_session - callback to stop existing BA sessions + * + * in order not to harm the system performance and user experience, the device + * may request not to allow any rx ba session and tear down existing rx ba + * sessions based on system constraints such as periodic BT activity that needs + * to limit wlan activity (eg.sco or a2dp)." + * in such cases, the intention is to limit the duration of the rx ppdu and + * therefore prevent the peer device to use a-mpdu aggregation. + * + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @ba_rx_bitmap: Bit map of open rx ba per tid + * @addr: & to bssid mac address + */ +void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, + const u8 *addr); + /* Rate control API */ /** @@ -3150,4 +3414,9 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) return ieee80211_iftype_p2p(vif->type, vif->p2p); } +void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, + int rssi_min_thold, + int rssi_max_thold); + +void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); #endif /* MAC80211_H */ diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 4014b623880..4ba8521490b 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -108,8 +108,8 @@ struct neighbour { __u8 dead; seqlock_t ha_lock; unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; - struct hh_cache *hh; - int (*output)(struct sk_buff *skb); + struct hh_cache hh; + int (*output)(struct neighbour *, struct sk_buff *); const struct neigh_ops *ops; struct rcu_head rcu; struct net_device *dev; @@ -118,12 +118,10 @@ struct neighbour { struct neigh_ops { int family; - void (*solicit)(struct neighbour *, struct sk_buff*); - void (*error_report)(struct neighbour *, struct sk_buff*); - int (*output)(struct sk_buff*); - int (*connected_output)(struct sk_buff*); - int (*hh_output)(struct sk_buff*); - int (*queue_xmit)(struct sk_buff*); + void (*solicit)(struct neighbour *, struct sk_buff *); + void (*error_report)(struct neighbour *, struct sk_buff *); + int (*output)(struct neighbour *, struct sk_buff *); + int (*connected_output)(struct neighbour *, struct sk_buff *); }; struct pneigh_entry { @@ -142,7 +140,7 @@ struct pneigh_entry { struct neigh_hash_table { struct neighbour __rcu **hash_buckets; - unsigned int hash_mask; + unsigned int hash_shift; __u32 hash_rnd; struct rcu_head rcu; }; @@ -205,9 +203,10 @@ extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags); extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); -extern int neigh_resolve_output(struct sk_buff *skb); -extern int neigh_connected_output(struct sk_buff *skb); -extern int neigh_compat_output(struct sk_buff *skb); +extern int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb); +extern int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb); +extern int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb); +extern int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb); extern struct neighbour *neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev); @@ -341,7 +340,16 @@ static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh_len); - return hh->hh_output(skb); + return dev_queue_xmit(skb); +} + +static inline int neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + struct hh_cache *hh = &n->hh; + if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) + return neigh_hh_output(hh, skb); + else + return n->output(n, skb); } static inline struct neighbour * diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index aef430d779b..1ab1aec209a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -65,6 +65,7 @@ struct net { struct list_head dev_base_head; struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; + unsigned int dev_base_seq; /* protected by rtnl_mutex */ /* core fib_rules */ struct list_head rules_ops; diff --git a/include/net/netlink.h b/include/net/netlink.h index 02740a94f10..98c185441be 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -638,6 +638,30 @@ static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid) nlmsg_ok(pos, rem); \ pos = nlmsg_next(pos, &(rem))) +/** + * nl_dump_check_consistent - check if sequence is consistent and advertise if not + * @cb: netlink callback structure that stores the sequence number + * @nlh: netlink message header to write the flag to + * + * This function checks if the sequence (generation) number changed during dump + * and if it did, advertises it in the netlink message header. + * + * The correct way to use it is to set cb->seq to the generation counter when + * all locks for dumping have been acquired, and then call this function for + * each message that is generated. + * + * Note that due to initialisation concerns, 0 is an invalid sequence number + * and must not be used by code that uses this functionality. + */ +static inline void +nl_dump_check_consistent(struct netlink_callback *cb, + struct nlmsghdr *nlh) +{ + if (cb->prev_seq && cb->seq != cb->prev_seq) + nlh->nlmsg_flags |= NLM_F_DUMP_INTR; + cb->prev_seq = cb->seq; +} + /************************************************************************** * Netlink Attributes **************************************************************************/ diff --git a/include/net/nfc.h b/include/net/nfc.h new file mode 100644 index 00000000000..cc0130312f7 --- /dev/null +++ b/include/net/nfc.h @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2011 Instituto Nokia de Tecnologia + * + * Authors: + * Lauro Ramos Venancio <lauro.venancio@openbossa.org> + * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __NET_NFC_H +#define __NET_NFC_H + +#include <linux/device.h> +#include <linux/skbuff.h> + +#define nfc_dev_info(dev, fmt, arg...) dev_info((dev), "NFC: " fmt "\n", ## arg) +#define nfc_dev_err(dev, fmt, arg...) dev_err((dev), "NFC: " fmt "\n", ## arg) +#define nfc_dev_dbg(dev, fmt, arg...) dev_dbg((dev), fmt "\n", ## arg) + +struct nfc_dev; + +/** + * data_exchange_cb_t - Definition of nfc_data_exchange callback + * + * @context: nfc_data_exchange cb_context parameter + * @skb: response data + * @err: If an error has occurred during data exchange, it is the + * error number. Zero means no error. + * + * When a rx or tx package is lost or corrupted or the target gets out + * of the operating field, err is -EIO. + */ +typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb, + int err); + +struct nfc_ops { + int (*start_poll)(struct nfc_dev *dev, u32 protocols); + void (*stop_poll)(struct nfc_dev *dev); + int (*activate_target)(struct nfc_dev *dev, u32 target_idx, + u32 protocol); + void (*deactivate_target)(struct nfc_dev *dev, u32 target_idx); + int (*data_exchange)(struct nfc_dev *dev, u32 target_idx, + struct sk_buff *skb, data_exchange_cb_t cb, + void *cb_context); +}; + +struct nfc_target { + u32 idx; + u32 supported_protocols; + u16 sens_res; + u8 sel_res; +}; + +struct nfc_genl_data { + u32 poll_req_pid; + struct mutex genl_data_mutex; +}; + +struct nfc_dev { + unsigned idx; + unsigned target_idx; + struct nfc_target *targets; + int n_targets; + int targets_generation; + spinlock_t targets_lock; + struct device dev; + bool polling; + struct nfc_genl_data genl_data; + u32 supported_protocols; + + struct nfc_ops *ops; +}; +#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) + +extern struct class nfc_class; + +struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, + u32 supported_protocols); + +/** + * nfc_free_device - free nfc device + * + * @dev: The nfc device to free + */ +static inline void nfc_free_device(struct nfc_dev *dev) +{ + put_device(&dev->dev); +} + +int nfc_register_device(struct nfc_dev *dev); + +void nfc_unregister_device(struct nfc_dev *dev); + +/** + * nfc_set_parent_dev - set the parent device + * + * @nfc_dev: The nfc device whose parent is being set + * @dev: The parent device + */ +static inline void nfc_set_parent_dev(struct nfc_dev *nfc_dev, + struct device *dev) +{ + nfc_dev->dev.parent = dev; +} + +/** + * nfc_set_drvdata - set driver specifc data + * + * @dev: The nfc device + * @data: Pointer to driver specifc data + */ +static inline void nfc_set_drvdata(struct nfc_dev *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +/** + * nfc_get_drvdata - get driver specifc data + * + * @dev: The nfc device + */ +static inline void *nfc_get_drvdata(struct nfc_dev *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +/** + * nfc_device_name - get the nfc device name + * + * @dev: The nfc device whose name to return + */ +static inline const char *nfc_device_name(struct nfc_dev *dev) +{ + return dev_name(&dev->dev); +} + +struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp); + +int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, + int ntargets); + +#endif /* __NET_NFC_H */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 65afc496620..fffdc603f4c 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -99,9 +99,9 @@ static inline void qdisc_run(struct Qdisc *q) __qdisc_run(q); } -extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, +extern int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res); -extern int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, +extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res); /* Calculate maximal size of packet seen by hard_start_xmit diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 4093ca78cf6..678f1ffaf84 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -6,11 +6,14 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); +typedef u16 (*rtnl_calcit_func)(struct sk_buff *); extern int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); + rtnl_doit_func, rtnl_dumpit_func, + rtnl_calcit_func); extern void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func); + rtnl_doit_func, rtnl_dumpit_func, + rtnl_calcit_func); extern int rtnl_unregister(int protocol, int msgtype); extern void rtnl_unregister_all(int protocol); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index b931f021d7a..4fc88f3ccd5 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -181,8 +181,9 @@ struct tcf_proto_ops { struct tcf_proto_ops *next; char kind[IFNAMSIZ]; - int (*classify)(struct sk_buff*, struct tcf_proto*, - struct tcf_result *); + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); int (*init)(struct tcf_proto*); void (*destroy)(struct tcf_proto*); @@ -205,8 +206,9 @@ struct tcf_proto { /* Fast access part */ struct tcf_proto *next; void *root; - int (*classify)(struct sk_buff*, struct tcf_proto*, - struct tcf_result *); + int (*classify)(struct sk_buff *, + const struct tcf_proto *, + struct tcf_result *); __be16 protocol; /* All the rest */ @@ -214,7 +216,7 @@ struct tcf_proto { u32 classid; struct Qdisc *q; void *data; - struct tcf_proto_ops *ops; + const struct tcf_proto_ops *ops; }; struct qdisc_skb_cb { diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index dd6847e5d6e..6506458ccd3 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -63,6 +63,7 @@ typedef enum { SCTP_CMD_ECN_ECNE, /* Do delayed ECNE processing. */ SCTP_CMD_ECN_CWR, /* Do delayed CWR processing. */ SCTP_CMD_TIMER_START, /* Start a timer. */ + SCTP_CMD_TIMER_START_ONCE, /* Start a timer once */ SCTP_CMD_TIMER_RESTART, /* Restart a timer. */ SCTP_CMD_TIMER_STOP, /* Stop a timer. */ SCTP_CMD_INIT_CHOOSE_TRANSPORT, /* Choose transport for an INIT. */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index b2c2366676a..6a72a58cde5 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -120,6 +120,7 @@ extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, int flags); extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); extern int sctp_register_pf(struct sctp_pf *, sa_family_t); +extern void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *, int); /* * sctp/socket.c @@ -134,6 +135,7 @@ void sctp_sock_rfree(struct sk_buff *skb); void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc); extern struct percpu_counter sctp_sockets_allocated; +extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); /* * sctp/primitive.c @@ -285,20 +287,21 @@ do { \ pr_cont(fmt, ##args); \ } while (0) #define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \ - args_lead, saddr, args_trail...) \ + args_lead, addr, args_trail...) \ do { \ + const union sctp_addr *_addr = (addr); \ if (sctp_debug_flag) { \ - if (saddr->sa.sa_family == AF_INET6) { \ + if (_addr->sa.sa_family == AF_INET6) { \ printk(KERN_DEBUG \ pr_fmt(fmt_lead "%pI6" fmt_trail), \ args_lead, \ - &saddr->v6.sin6_addr, \ + &_addr->v6.sin6_addr, \ args_trail); \ } else { \ printk(KERN_DEBUG \ pr_fmt(fmt_lead "%pI4" fmt_trail), \ args_lead, \ - &saddr->v4.sin_addr.s_addr, \ + &_addr->v4.sin_addr.s_addr, \ args_trail); \ } \ } \ @@ -598,7 +601,7 @@ static inline int ipver2af(__u8 ipver) return AF_INET6; default: return 0; - }; + } } /* Convert from an address parameter type to an address family. */ @@ -611,7 +614,7 @@ static inline int param_type2af(__be16 type) return AF_INET6; default: return 0; - }; + } } /* Perform some sanity checks. */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 7df327a6d56..31d7ea2e1d2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -205,6 +205,11 @@ extern struct sctp_globals { * It is a list of sctp_sockaddr_entry. */ struct list_head local_addr_list; + int default_auto_asconf; + struct list_head addr_waitq; + struct timer_list addr_wq_timer; + struct list_head auto_asconf_splist; + spinlock_t addr_wq_lock; /* Lock that protects the local_addr_list writers */ spinlock_t addr_list_lock; @@ -264,6 +269,11 @@ extern struct sctp_globals { #define sctp_port_hashtable (sctp_globals.port_hashtable) #define sctp_local_addr_list (sctp_globals.local_addr_list) #define sctp_local_addr_lock (sctp_globals.addr_list_lock) +#define sctp_auto_asconf_splist (sctp_globals.auto_asconf_splist) +#define sctp_addr_waitq (sctp_globals.addr_waitq) +#define sctp_addr_wq_timer (sctp_globals.addr_wq_timer) +#define sctp_addr_wq_lock (sctp_globals.addr_wq_lock) +#define sctp_default_auto_asconf (sctp_globals.default_auto_asconf) #define sctp_scope_policy (sctp_globals.ipv4_scope_policy) #define sctp_addip_enable (sctp_globals.addip_enable) #define sctp_addip_noauth (sctp_globals.addip_noauth_enable) @@ -341,6 +351,8 @@ struct sctp_sock { atomic_t pd_mode; /* Receive to here while partial delivery is in effect. */ struct sk_buff_head pd_lobby; + struct list_head auto_asconf_list; + int do_auto_asconf; }; static inline struct sctp_sock *sctp_sk(const struct sock *sk) @@ -792,6 +804,8 @@ struct sctp_sockaddr_entry { __u8 valid; }; +#define SCTP_ADDRESS_TICK_DELAY 500 + typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); /* This structure holds lists of chunks as we are assembling for @@ -1236,6 +1250,7 @@ sctp_scope_t sctp_scope(const union sctp_addr *); int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); int sctp_is_any(struct sock *sk, const union sctp_addr *addr); int sctp_addr_is_valid(const union sctp_addr *addr); +int sctp_is_ep_boundall(struct sock *sk); /* What type of endpoint? */ @@ -1898,6 +1913,8 @@ struct sctp_association { * after reaching 4294967295. */ __u32 addip_serial; + union sctp_addr *asconf_addr_del_pending; + int src_out_of_asoc_ok; /* SCTP AUTH: list of the endpoint shared keys. These * keys are provided out of band by the user applicaton diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 99b027b2adc..ca4693b4e09 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -80,7 +80,7 @@ static inline struct sctp_ulpevent *sctp_skb2event(struct sk_buff *skb) void sctp_ulpevent_free(struct sctp_ulpevent *); int sctp_ulpevent_is_notification(const struct sctp_ulpevent *); -void sctp_queue_purge_ulpevents(struct sk_buff_head *list); +unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list); struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index 32fd5127403..0842ef00b2f 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -92,6 +92,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_LOCAL_AUTH_CHUNKS 27 /* Read only */ #define SCTP_GET_ASSOC_NUMBER 28 /* Read only */ #define SCTP_GET_ASSOC_ID_LIST 29 /* Read only */ +#define SCTP_AUTO_ASCONF 30 /* Internal Socket Options. Some of the sctp library functions are * implemented using these socket options. diff --git a/include/net/snmp.h b/include/net/snmp.h index 479083a78b0..8f0f9ac0307 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -116,57 +116,51 @@ struct linux_xfrm_mib { unsigned long mibs[LINUX_MIB_XFRMMAX]; }; -/* - * FIXME: On x86 and some other CPUs the split into user and softirq parts - * is not needed because addl $1,memory is atomic against interrupts (but - * atomic_inc would be overkill because of the lock cycles). Wants new - * nonlocked_atomic_inc() primitives -AK - */ +#define SNMP_ARRAY_SZ 1 + #define DEFINE_SNMP_STAT(type, name) \ - __typeof__(type) __percpu *name[2] + __typeof__(type) __percpu *name[SNMP_ARRAY_SZ] #define DEFINE_SNMP_STAT_ATOMIC(type, name) \ __typeof__(type) *name #define DECLARE_SNMP_STAT(type, name) \ - extern __typeof__(type) __percpu *name[2] - -#define SNMP_STAT_BHPTR(name) (name[0]) -#define SNMP_STAT_USRPTR(name) (name[1]) + extern __typeof__(type) __percpu *name[SNMP_ARRAY_SZ] #define SNMP_INC_STATS_BH(mib, field) \ __this_cpu_inc(mib[0]->mibs[field]) + #define SNMP_INC_STATS_USER(mib, field) \ - this_cpu_inc(mib[1]->mibs[field]) + irqsafe_cpu_inc(mib[0]->mibs[field]) + #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ atomic_long_inc(&mib->mibs[field]) + #define SNMP_INC_STATS(mib, field) \ - this_cpu_inc(mib[!in_softirq()]->mibs[field]) + irqsafe_cpu_inc(mib[0]->mibs[field]) + #define SNMP_DEC_STATS(mib, field) \ - this_cpu_dec(mib[!in_softirq()]->mibs[field]) + irqsafe_cpu_dec(mib[0]->mibs[field]) + #define SNMP_ADD_STATS_BH(mib, field, addend) \ __this_cpu_add(mib[0]->mibs[field], addend) + #define SNMP_ADD_STATS_USER(mib, field, addend) \ - this_cpu_add(mib[1]->mibs[field], addend) + irqsafe_cpu_add(mib[0]->mibs[field], addend) + #define SNMP_ADD_STATS(mib, field, addend) \ - this_cpu_add(mib[!in_softirq()]->mibs[field], addend) + irqsafe_cpu_add(mib[0]->mibs[field], addend) /* * Use "__typeof__(*mib[0]) *ptr" instead of "__typeof__(mib[0]) ptr" * to make @ptr a non-percpu pointer. */ #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(*mib[0]) *ptr; \ - preempt_disable(); \ - ptr = this_cpu_ptr((mib)[!in_softirq()]); \ - ptr->mibs[basefield##PKTS]++; \ - ptr->mibs[basefield##OCTETS] += addend;\ - preempt_enable(); \ + irqsafe_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ + irqsafe_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(*mib[0]) *ptr = \ - __this_cpu_ptr((mib)[0]); \ - ptr->mibs[basefield##PKTS]++; \ - ptr->mibs[basefield##OCTETS] += addend;\ + __this_cpu_inc(mib[0]->mibs[basefield##PKTS]); \ + __this_cpu_add(mib[0]->mibs[basefield##OCTETS], addend); \ } while (0) @@ -179,40 +173,20 @@ struct linux_xfrm_mib { ptr->mibs[field] += addend; \ u64_stats_update_end(&ptr->syncp); \ } while (0) + #define SNMP_ADD_STATS64_USER(mib, field, addend) \ do { \ - __typeof__(*mib[0]) *ptr; \ - preempt_disable(); \ - ptr = __this_cpu_ptr((mib)[1]); \ - u64_stats_update_begin(&ptr->syncp); \ - ptr->mibs[field] += addend; \ - u64_stats_update_end(&ptr->syncp); \ - preempt_enable(); \ + local_bh_disable(); \ + SNMP_ADD_STATS64_BH(mib, field, addend); \ + local_bh_enable(); \ } while (0) + #define SNMP_ADD_STATS64(mib, field, addend) \ - do { \ - __typeof__(*mib[0]) *ptr; \ - preempt_disable(); \ - ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ - u64_stats_update_begin(&ptr->syncp); \ - ptr->mibs[field] += addend; \ - u64_stats_update_end(&ptr->syncp); \ - preempt_enable(); \ - } while (0) + SNMP_ADD_STATS64_USER(mib, field, addend) + #define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1) #define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1) #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) -#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ - do { \ - __typeof__(*mib[0]) *ptr; \ - preempt_disable(); \ - ptr = __this_cpu_ptr((mib)[!in_softirq()]); \ - u64_stats_update_begin(&ptr->syncp); \ - ptr->mibs[basefield##PKTS]++; \ - ptr->mibs[basefield##OCTETS] += addend; \ - u64_stats_update_end(&ptr->syncp); \ - preempt_enable(); \ - } while (0) #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ do { \ __typeof__(*mib[0]) *ptr; \ @@ -222,6 +196,12 @@ struct linux_xfrm_mib { ptr->mibs[basefield##OCTETS] += addend; \ u64_stats_update_end(&ptr->syncp); \ } while (0) +#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ + do { \ + local_bh_disable(); \ + SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ + local_bh_enable(); \ + } while (0) #else #define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) #define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field) diff --git a/include/net/sock.h b/include/net/sock.h index c0b938cb4b1..396f735e0cd 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -40,6 +40,7 @@ #ifndef _SOCK_H #define _SOCK_H +#include <linux/hardirq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/list_nulls.h> @@ -562,6 +563,7 @@ enum sock_flags { SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ SOCK_FASYNC, /* fasync() active */ SOCK_RXQ_OVFL, + SOCK_ZEROCOPY, /* buffers from userspace */ }; static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) diff --git a/include/net/tcp.h b/include/net/tcp.h index cda30ea354a..149a415d1e0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -122,7 +122,13 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #endif #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) -#define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */ +#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */ +#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now + * used as a fallback RTO for the + * initial data transmission if no + * valid RTT sample has been acquired, + * most likely due to retrans in 3WHS. + */ #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. @@ -295,7 +301,7 @@ static inline void tcp_synq_overflow(struct sock *sk) static inline int tcp_synq_no_recent_overflow(const struct sock *sk) { unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - return time_after(jiffies, last_overflow + TCP_TIMEOUT_INIT); + return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); } extern struct proto tcp_prot; @@ -508,6 +514,7 @@ extern void tcp_initialize_rcv_mss(struct sock *sk); extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); extern int tcp_mss_to_mtu(struct sock *sk, int mss); extern void tcp_mtup_init(struct sock *sk); +extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); static inline void tcp_bound_rto(const struct sock *sk) { diff --git a/include/net/wimax.h b/include/net/wimax.h index 7328d5019d8..322ff4fbdb4 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -423,7 +423,6 @@ struct wimax_dev { int (*op_reset)(struct wimax_dev *wimax_dev); struct rfkill *rfkill; - struct input_dev *rfkill_input; unsigned rf_hw; unsigned rf_sw; char name[32]; diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h new file mode 100644 index 00000000000..a5889f18807 --- /dev/null +++ b/include/rdma/ib_pma.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. + * All rights reserved. + * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#if !defined(IB_PMA_H) +#define IB_PMA_H + +#include <rdma/ib_mad.h> + +/* + * PMA class portinfo capability mask bits + */ +#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8) +#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9) +#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12) + +#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) +#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010) +#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011) +#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012) +#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D) +#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E) + +struct ib_pma_mad { + struct ib_mad_hdr mad_hdr; + u8 reserved[40]; + u8 data[192]; +} __packed; + +struct ib_pma_portsamplescontrol { + u8 opcode; + u8 port_select; + u8 tick; + u8 counter_width; /* resv: 7:3, counter width: 2:0 */ + __be32 counter_mask0_9; /* 2, 10 3-bit fields */ + __be16 counter_mask10_14; /* 1, 5 3-bit fields */ + u8 sample_mechanisms; + u8 sample_status; /* only lower 2 bits */ + __be64 option_mask; + __be64 vendor_mask; + __be32 sample_start; + __be32 sample_interval; + __be16 tag; + __be16 counter_select[15]; + __be32 reserved1; + __be64 samples_only_option_mask; + __be32 reserved2[28]; +}; + +struct ib_pma_portsamplesresult { + __be16 tag; + __be16 sample_status; /* only lower 2 bits */ + __be32 counter[15]; +}; + +struct ib_pma_portsamplesresult_ext { + __be16 tag; + __be16 sample_status; /* only lower 2 bits */ + __be32 extended_width; /* only upper 2 bits */ + __be64 counter[15]; +}; + +struct ib_pma_portcounters { + u8 reserved; + u8 port_select; + __be16 counter_select; + __be16 symbol_error_counter; + u8 link_error_recovery_counter; + u8 link_downed_counter; + __be16 port_rcv_errors; + __be16 port_rcv_remphys_errors; + __be16 port_rcv_switch_relay_errors; + __be16 port_xmit_discards; + u8 port_xmit_constraint_errors; + u8 port_rcv_constraint_errors; + u8 reserved1; + u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */ + __be16 reserved2; + __be16 vl15_dropped; + __be32 port_xmit_data; + __be32 port_rcv_data; + __be32 port_xmit_packets; + __be32 port_rcv_packets; + __be32 port_xmit_wait; +} __packed; + + +#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001) +#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002) +#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004) +#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008) +#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010) +#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040) +#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200) +#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400) +#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800) +#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000) +#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000) +#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000) +#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000) + +struct ib_pma_portcounters_ext { + u8 reserved; + u8 port_select; + __be16 counter_select; + __be32 reserved1; + __be64 port_xmit_data; + __be64 port_rcv_data; + __be64 port_xmit_packets; + __be64 port_rcv_packets; + __be64 port_unicast_xmit_packets; + __be64 port_unicast_rcv_packets; + __be64 port_multicast_xmit_packets; + __be64 port_multicast_rcv_packets; +} __packed; + +#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001) +#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002) +#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004) +#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008) +#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010) +#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020) +#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040) +#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080) + +#endif /* IB_PMA_H */ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 55cd0a0bc97..bf4306aea16 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -350,7 +350,8 @@ enum ib_event_type { IB_EVENT_SRQ_ERR, IB_EVENT_SRQ_LIMIT_REACHED, IB_EVENT_QP_LAST_WQE_REACHED, - IB_EVENT_CLIENT_REREGISTER + IB_EVENT_CLIENT_REREGISTER, + IB_EVENT_GID_CHANGE, }; struct ib_event { diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h index dd0a52cea95..ea68b3c56db 100644 --- a/include/scsi/iscsi_proto.h +++ b/include/scsi/iscsi_proto.h @@ -60,7 +60,7 @@ struct iscsi_hdr { uint8_t rsvd2[2]; uint8_t hlength; /* AHSs total length */ uint8_t dlength[3]; /* Data length */ - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag, opaque for target */ __be32 ttt; /* Target Task Tag */ __be32 statsn; @@ -122,7 +122,7 @@ struct iscsi_cmd { __be16 rsvd2; uint8_t hlength; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag */ __be32 data_length; __be32 cmdsn; @@ -198,7 +198,7 @@ struct iscsi_async { uint8_t rsvd2[2]; uint8_t rsvd3; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; uint8_t rsvd4[8]; __be32 statsn; __be32 exp_cmdsn; @@ -226,7 +226,7 @@ struct iscsi_nopout { __be16 rsvd2; uint8_t rsvd3; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag */ __be32 ttt; /* Target Transfer Tag */ __be32 cmdsn; @@ -241,7 +241,7 @@ struct iscsi_nopin { __be16 rsvd2; uint8_t rsvd3; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag */ __be32 ttt; /* Target Transfer Tag */ __be32 statsn; @@ -257,7 +257,7 @@ struct iscsi_tm { uint8_t rsvd1[2]; uint8_t hlength; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag */ itt_t rtt; /* Reference Task Tag */ __be32 cmdsn; @@ -315,7 +315,7 @@ struct iscsi_r2t_rsp { uint8_t rsvd2[2]; uint8_t hlength; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; /* Initiator Task Tag */ __be32 ttt; /* Target Transfer Tag */ __be32 statsn; @@ -333,7 +333,7 @@ struct iscsi_data { uint8_t rsvd2[2]; uint8_t rsvd3; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 rsvd4; @@ -353,7 +353,7 @@ struct iscsi_data_rsp { uint8_t cmd_status; uint8_t hlength; uint8_t dlength[3]; - uint8_t lun[8]; + struct scsi_lun lun; itt_t itt; __be32 ttt; __be32 statsn; diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index a3cbda4ddb5..7d96829b0c0 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -511,6 +511,14 @@ struct libfc_function_template { */ int (*ddp_done)(struct fc_lport *, u16); /* + * Sets up the DDP context for a given exchange id on the given + * scatterlist if LLD supports DDP for FCoE target. + * + * STATUS: OPTIONAL + */ + int (*ddp_target)(struct fc_lport *, u16, struct scatterlist *, + unsigned int); + /* * Allow LLD to fill its own Link Error Status Block * * STATUS: OPTIONAL diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 0f4367751b7..cedcff371c8 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -115,7 +115,7 @@ struct iscsi_task { /* copied values in case we need to send tmfs */ itt_t hdr_itt; __be32 cmdsn; - uint8_t lun[8]; + struct scsi_lun lun; int itt; /* this ITT */ diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 2480e7d10dc..6b14359d9fe 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -27,6 +27,7 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/mutex.h> +#include <linux/workqueue.h> #if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE) #include "seq_device.h" @@ -63,6 +64,7 @@ struct snd_rawmidi_global_ops { }; struct snd_rawmidi_runtime { + struct snd_rawmidi_substream *substream; unsigned int drain: 1, /* drain stage */ oss: 1; /* OSS compatible mode */ /* midi stream buffer */ @@ -79,7 +81,7 @@ struct snd_rawmidi_runtime { /* event handler (new bytes, input only) */ void (*event)(struct snd_rawmidi_substream *substream); /* defers calls to event [input] or ops->trigger [output] */ - struct tasklet_struct tasklet; + struct work_struct event_work; /* private data */ void *private_data; void (*private_free)(struct snd_rawmidi_substream *substream); diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 1bafe95dcf4..5ad5f3a50c6 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -209,6 +209,10 @@ struct snd_soc_dai_driver { struct snd_soc_pcm_stream capture; struct snd_soc_pcm_stream playback; unsigned int symmetric_rates:1; + + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; }; /* diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index c46e7d89561..e09505c5a49 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -348,6 +348,8 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm); void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm); int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm, const struct snd_soc_dapm_route *route, int num); +int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, + const struct snd_soc_dapm_route *route, int num); /* dapm events */ int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, @@ -429,6 +431,7 @@ struct snd_soc_dapm_path { /* status */ u32 connect:1; /* source and sink widgets are connected */ u32 walked:1; /* path has been walked */ + u32 weak:1; /* path ignored for power management */ int (*connected)(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink); @@ -444,6 +447,7 @@ struct snd_soc_dapm_widget { char *name; /* widget name */ char *sname; /* stream name */ struct snd_soc_codec *codec; + struct snd_soc_platform *platform; struct list_head list; struct snd_soc_dapm_context *dapm; @@ -507,10 +511,11 @@ struct snd_soc_dapm_context { struct device *dev; /* from parent - for debug */ struct snd_soc_codec *codec; /* parent codec */ + struct snd_soc_platform *platform; /* parent platform */ struct snd_soc_card *card; /* parent card */ /* used during DAPM updates */ - int dev_power; + enum snd_soc_bias_level target_bias_level; struct list_head list; #ifdef CONFIG_DEBUG_FS diff --git a/include/sound/soc.h b/include/sound/soc.h index 3a4bd3a3c68..aa19f5a32ba 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -203,6 +203,16 @@ SOC_VALUE_ENUM_DOUBLE_DECL(name, xreg, xshift, xshift, xmask, xtexts, xvalues) /* + * Component probe and remove ordering levels for components with runtime + * dependencies. + */ +#define SND_SOC_COMP_ORDER_FIRST -2 +#define SND_SOC_COMP_ORDER_EARLY -1 +#define SND_SOC_COMP_ORDER_NORMAL 0 +#define SND_SOC_COMP_ORDER_LATE 1 +#define SND_SOC_COMP_ORDER_LAST 2 + +/* * Bias levels * * @ON: Bias is fully on for audio playback and capture operations. @@ -214,10 +224,10 @@ * @OFF: Power Off. No restrictions on transition times. */ enum snd_soc_bias_level { - SND_SOC_BIAS_OFF, - SND_SOC_BIAS_STANDBY, - SND_SOC_BIAS_PREPARE, - SND_SOC_BIAS_ON, + SND_SOC_BIAS_OFF = 0, + SND_SOC_BIAS_STANDBY = 1, + SND_SOC_BIAS_PREPARE = 2, + SND_SOC_BIAS_ON = 3, }; struct snd_jack; @@ -258,6 +268,11 @@ enum snd_soc_compress_type { SND_SOC_RBTREE_COMPRESSION }; +enum snd_soc_pcm_subclass { + SND_SOC_PCM_CLASS_PCM = 0, + SND_SOC_PCM_CLASS_BE = 1, +}; + int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id, unsigned int freq, int dir); int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, @@ -297,6 +312,10 @@ int snd_soc_default_readable_register(struct snd_soc_codec *codec, unsigned int reg); int snd_soc_default_writable_register(struct snd_soc_codec *codec, unsigned int reg); +int snd_soc_platform_read(struct snd_soc_platform *platform, + unsigned int reg); +int snd_soc_platform_write(struct snd_soc_platform *platform, + unsigned int reg, unsigned int val); /* Utility functions to get clock rates from various things */ int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots); @@ -349,6 +368,8 @@ struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, const char *prefix); int snd_soc_add_controls(struct snd_soc_codec *codec, const struct snd_kcontrol_new *controls, int num_controls); +int snd_soc_add_platform_controls(struct snd_soc_platform *platform, + const struct snd_kcontrol_new *controls, int num_controls); int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol, @@ -612,6 +633,10 @@ struct snd_soc_codec_driver { void (*seq_notifier)(struct snd_soc_dapm_context *, enum snd_soc_dapm_type, int); + + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; }; /* SoC platform interface */ @@ -623,10 +648,17 @@ struct snd_soc_platform_driver { int (*resume)(struct snd_soc_dai *dai); /* pcm creation and destruction */ - int (*pcm_new)(struct snd_card *, struct snd_soc_dai *, - struct snd_pcm *); + int (*pcm_new)(struct snd_soc_pcm_runtime *); void (*pcm_free)(struct snd_pcm *); + /* Default control and setup, added after probe() is run */ + const struct snd_kcontrol_new *controls; + int num_controls; + const struct snd_soc_dapm_widget *dapm_widgets; + int num_dapm_widgets; + const struct snd_soc_dapm_route *dapm_routes; + int num_dapm_routes; + /* * For platform caused delay reporting. * Optional. @@ -636,6 +668,14 @@ struct snd_soc_platform_driver { /* platform stream ops */ struct snd_pcm_ops *ops; + + /* probe ordering - for components with runtime dependencies */ + int probe_order; + int remove_order; + + /* platform IO - used for platform DAPM */ + unsigned int (*read)(struct snd_soc_platform *, unsigned int); + int (*write)(struct snd_soc_platform *, unsigned int, unsigned int); }; struct snd_soc_platform { @@ -650,6 +690,8 @@ struct snd_soc_platform { struct snd_soc_card *card; struct list_head list; struct list_head card_list; + + struct snd_soc_dapm_context dapm; }; struct snd_soc_dai_link { @@ -725,8 +767,10 @@ struct snd_soc_card { /* callbacks */ int (*set_bias_level)(struct snd_soc_card *, + struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level); int (*set_bias_level_post)(struct snd_soc_card *, + struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level); long pmdown_time; @@ -789,6 +833,9 @@ struct snd_soc_pcm_runtime { struct device dev; struct snd_soc_card *card; struct snd_soc_dai_link *dai_link; + struct mutex pcm_mutex; + enum snd_soc_pcm_subclass pcm_subclass; + struct snd_pcm_ops ops; unsigned int complete:1; unsigned int dev_registered:1; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 561ac99def5..27040653005 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -9,12 +9,13 @@ #include <net/sock.h> #include <net/tcp.h> -#define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" +#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) /* Used by transport_generic_allocate_iovecs() */ #define TRANSPORT_IOV_DATA_BUFFER 5 /* Maximum Number of LUNs per Target Portal Group */ +/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ #define TRANSPORT_MAX_LUNS_PER_TPG 256 /* * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. @@ -99,6 +100,7 @@ enum transport_state_table { TRANSPORT_FREE = 15, TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, + TRANSPORT_COMPLETE_QF_WP = 18, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -108,27 +110,22 @@ enum se_cmd_flags_table { SCF_EMULATED_TASK_SENSE = 0x00000004, SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, - SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, SCF_SCSI_NON_DATA_CDB = 0x00000040, SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, - SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, SCF_SE_CMD_FAILED = 0x00000400, SCF_SE_LUN_CMD = 0x00000800, SCF_SE_ALLOW_EOO = 0x00001000, - SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, SCF_SENT_CHECK_CONDITION = 0x00004000, SCF_OVERFLOW_BIT = 0x00008000, SCF_UNDERFLOW_BIT = 0x00010000, SCF_SENT_DELAYED_TAS = 0x00020000, SCF_ALUA_NON_OPTIMIZED = 0x00040000, SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, - SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, - SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, + SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, - SCF_EMULATE_SYNC_CACHE = 0x00800000, SCF_EMULATE_CDB_ASYNC = 0x01000000, - SCF_EMULATE_SYNC_UNMAP = 0x02000000 + SCF_EMULATE_QUEUE_FULL = 0x02000000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -205,11 +202,6 @@ typedef enum { SCSI_INDEX_TYPE_MAX } scsi_index_t; -struct scsi_index_table { - spinlock_t lock; - u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; -} ____cacheline_aligned; - struct se_cmd; struct t10_alua { @@ -235,7 +227,7 @@ struct t10_alua_lu_gp { atomic_t lu_gp_ref_cnt; spinlock_t lu_gp_lock; struct config_group lu_gp_group; - struct list_head lu_gp_list; + struct list_head lu_gp_node; struct list_head lu_gp_mem_list; } ____cacheline_aligned; @@ -291,10 +283,10 @@ struct t10_vpd { } ____cacheline_aligned; struct t10_wwn { - unsigned char vendor[8]; - unsigned char model[16]; - unsigned char revision[4]; - unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; + char vendor[8]; + char model[16]; + char revision[4]; + char unit_serial[INQUIRY_VPD_SERIAL_LEN]; spinlock_t t10_vpd_lock; struct se_subsystem_dev *t10_sub_dev; struct config_group t10_wwn_group; @@ -366,13 +358,13 @@ struct t10_reservation_ops { int (*t10_pr_clear)(struct se_cmd *); }; -struct t10_reservation_template { +struct t10_reservation { /* Reservation effects all target ports */ int pr_all_tg_pt; /* Activate Persistence across Target Power Loss enabled * for SCSI device */ int pr_aptpl_active; - /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ + /* Used by struct t10_reservation->pr_aptpl_buf_len */ #define PR_APTPL_BUF_LEN 8192 u32 pr_aptpl_buf_len; u32 pr_generation; @@ -397,7 +389,7 @@ struct t10_reservation_template { struct se_queue_req { int state; - void *cmd; + struct se_cmd *cmd; struct list_head qr_list; } ____cacheline_aligned; @@ -408,64 +400,10 @@ struct se_queue_obj { wait_queue_head_t thread_wq; } ____cacheline_aligned; -/* - * Used one per struct se_cmd to hold all extra struct se_task - * metadata. This structure is setup and allocated in - * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() - */ -struct se_transport_task { - unsigned char *t_task_cdb; - unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; - unsigned long long t_task_lba; - int t_tasks_failed; - int t_tasks_fua; - bool t_tasks_bidi; - u32 t_task_cdbs; - u32 t_tasks_check; - u32 t_tasks_no; - u32 t_tasks_sectors; - u32 t_tasks_se_num; - u32 t_tasks_se_bidi_num; - u32 t_tasks_sg_chained_no; - atomic_t t_fe_count; - atomic_t t_se_count; - atomic_t t_task_cdbs_left; - atomic_t t_task_cdbs_ex_left; - atomic_t t_task_cdbs_timeout_left; - atomic_t t_task_cdbs_sent; - atomic_t t_transport_aborted; - atomic_t t_transport_active; - atomic_t t_transport_complete; - atomic_t t_transport_queue_active; - atomic_t t_transport_sent; - atomic_t t_transport_stop; - atomic_t t_transport_timeout; - atomic_t transport_dev_active; - atomic_t transport_lun_active; - atomic_t transport_lun_fe_stop; - atomic_t transport_lun_stop; - spinlock_t t_state_lock; - struct completion t_transport_stop_comp; - struct completion transport_lun_fe_stop_comp; - struct completion transport_lun_stop_comp; - struct scatterlist *t_tasks_sg_chained; - struct scatterlist t_tasks_sg_bounce; - void *t_task_buf; - /* - * Used for pre-registered fabric SGL passthrough WRITE and READ - * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop - * and other HW target mode fabric modules. - */ - struct scatterlist *t_task_pt_sgl; - struct list_head *t_mem_list; - /* Used for BIDI READ */ - struct list_head *t_mem_bidi_list; - struct list_head t_task_list; -} ____cacheline_aligned; - struct se_task { unsigned char task_sense; struct scatterlist *task_sg; + u32 task_sg_nents; struct scatterlist *task_sg_bidi; u8 task_scsi_status; u8 task_flags; @@ -476,8 +414,6 @@ struct se_task { u32 task_no; u32 task_sectors; u32 task_size; - u32 task_sg_num; - u32 task_sg_offset; enum dma_data_direction task_data_direction; struct se_cmd *task_se_cmd; struct se_device *se_dev; @@ -495,9 +431,6 @@ struct se_task { struct list_head t_state_list; } ____cacheline_aligned; -#define TASK_CMD(task) ((task)->task_se_cmd) -#define TASK_DEV(task) ((task)->se_dev) - struct se_cmd { /* SAM response code being sent to initiator */ u8 scsi_status; @@ -531,9 +464,10 @@ struct se_cmd { atomic_t transport_sent; /* Used for sense data */ void *sense_buffer; - struct list_head se_delayed_list; - struct list_head se_ordered_list; - struct list_head se_lun_list; + struct list_head se_delayed_node; + struct list_head se_ordered_node; + struct list_head se_lun_node; + struct list_head se_qf_node; struct se_device *se_dev; struct se_dev_entry *se_deve; struct se_device *se_obj_ptr; @@ -542,18 +476,62 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; - /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ - struct se_transport_task *t_task; - struct se_transport_task t_task_backstore; + struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); - void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); + void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); void (*transport_wait_for_tasks)(struct se_cmd *, int, int); void (*transport_complete_callback)(struct se_cmd *); -} ____cacheline_aligned; + int (*transport_qf_callback)(struct se_cmd *); -#define T_TASK(cmd) ((cmd)->t_task) -#define CMD_TFO(cmd) ((cmd)->se_tfo) + unsigned char *t_task_cdb; + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; + unsigned long long t_task_lba; + int t_tasks_failed; + int t_tasks_fua; + bool t_tasks_bidi; + u32 t_tasks_sg_chained_no; + atomic_t t_fe_count; + atomic_t t_se_count; + atomic_t t_task_cdbs_left; + atomic_t t_task_cdbs_ex_left; + atomic_t t_task_cdbs_timeout_left; + atomic_t t_task_cdbs_sent; + atomic_t t_transport_aborted; + atomic_t t_transport_active; + atomic_t t_transport_complete; + atomic_t t_transport_queue_active; + atomic_t t_transport_sent; + atomic_t t_transport_stop; + atomic_t t_transport_timeout; + atomic_t transport_dev_active; + atomic_t transport_lun_active; + atomic_t transport_lun_fe_stop; + atomic_t transport_lun_stop; + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + struct completion transport_lun_fe_stop_comp; + struct completion transport_lun_stop_comp; + struct scatterlist *t_tasks_sg_chained; + + /* + * Used for pre-registered fabric SGL passthrough WRITE and READ + * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop + * and other HW target mode fabric modules. + */ + struct scatterlist *t_task_pt_sgl; + u32 t_task_pt_sgl_num; + + struct scatterlist *t_data_sg; + unsigned int t_data_nents; + struct scatterlist *t_bidi_data_sg; + unsigned int t_bidi_data_nents; + + /* Used for BIDI READ */ + struct list_head t_task_list; + u32 t_task_list_num; + +} ____cacheline_aligned; struct se_tmr_req { /* Task Management function to be preformed */ @@ -617,9 +595,6 @@ struct se_session { struct list_head sess_acl_list; } ____cacheline_aligned; -#define SE_SESS(cmd) ((cmd)->se_sess) -#define SE_NODE_ACL(sess) ((sess)->se_node_acl) - struct se_device; struct se_transform_info; struct scatterlist; @@ -640,8 +615,6 @@ struct se_lun_acl { struct se_ml_stat_grps ml_stat_grps; } ____cacheline_aligned; -#define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) - struct se_dev_entry { bool def_pr_registered; /* See transport_lunflags_table */ @@ -688,6 +661,8 @@ struct se_dev_attrib { int emulate_reservations; int emulate_alua; int enforce_pr_isids; + int is_nonrot; + int emulate_rest_reord; u32 hw_block_size; u32 block_size; u32 hw_max_sectors; @@ -727,10 +702,10 @@ struct se_subsystem_dev { /* T10 Inquiry and VPD WWN Information */ struct t10_wwn t10_wwn; /* T10 SPC-2 + SPC-3 Reservations */ - struct t10_reservation_template t10_reservation; + struct t10_reservation t10_pr; spinlock_t se_dev_lock; void *se_dev_su_ptr; - struct list_head g_se_dev_list; + struct list_head se_dev_node; struct config_group se_dev_group; /* For T10 Reservations */ struct config_group se_dev_pr_group; @@ -738,11 +713,6 @@ struct se_subsystem_dev { struct se_dev_stat_grps dev_stat_grps; } ____cacheline_aligned; -#define T10_ALUA(su_dev) (&(su_dev)->t10_alua) -#define T10_RES(su_dev) (&(su_dev)->t10_reservation) -#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) -#define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) - struct se_device { /* Set to 1 if thread is NOT sleeping on thread_sem */ u8 thread_active; @@ -780,11 +750,11 @@ struct se_device { atomic_t dev_status_thr_count; atomic_t dev_hoq_count; atomic_t dev_ordered_sync; + atomic_t dev_qf_count; struct se_obj dev_obj; struct se_obj dev_access_obj; struct se_obj dev_export_obj; - struct se_queue_obj *dev_queue_obj; - struct se_queue_obj *dev_status_queue_obj; + struct se_queue_obj dev_queue_obj; spinlock_t delayed_cmd_lock; spinlock_t ordered_cmd_lock; spinlock_t execute_task_lock; @@ -796,6 +766,7 @@ struct se_device { spinlock_t dev_status_thr_lock; spinlock_t se_port_lock; spinlock_t se_tmr_lock; + spinlock_t qf_cmd_lock; /* Used for legacy SPC-2 reservationsa */ struct se_node_acl *dev_reserved_node_acl; /* Used for ALUA Logical Unit Group membership */ @@ -809,10 +780,12 @@ struct se_device { struct task_struct *process_thread; pid_t process_thread_pid; struct task_struct *dev_mgmt_thread; + struct work_struct qf_work_queue; struct list_head delayed_cmd_list; struct list_head ordered_cmd_list; struct list_head execute_task_list; struct list_head state_task_list; + struct list_head qf_cmd_list; /* Pointer to associated SE HBA */ struct se_hba *se_hba; struct se_subsystem_dev *se_sub_dev; @@ -824,11 +797,6 @@ struct se_device { struct list_head g_se_dev_list; } ____cacheline_aligned; -#define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) -#define SU_DEV(dev) ((dev)->se_sub_dev) -#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) -#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) - struct se_hba { u16 hba_tpgt; u32 hba_id; @@ -837,24 +805,17 @@ struct se_hba { /* Virtual iSCSI devices attached. */ u32 dev_count; u32 hba_index; - atomic_t load_balance_queue; - atomic_t left_queue_depth; - /* Maximum queue depth the HBA can handle. */ - atomic_t max_queue_depth; /* Pointer to transport specific host structure. */ void *hba_ptr; /* Linked list for struct se_device */ struct list_head hba_dev_list; - struct list_head hba_list; + struct list_head hba_node; spinlock_t device_lock; - spinlock_t hba_queue_lock; struct config_group hba_group; struct mutex hba_access_mutex; struct se_subsystem_api *transport; } ____cacheline_aligned; -#define SE_HBA(dev) ((dev)->se_hba) - struct se_port_stat_grps { struct config_group stat_group; struct config_group scsi_port_group; @@ -881,9 +842,6 @@ struct se_lun { struct se_port_stat_grps port_stat_grps; } ____cacheline_aligned; -#define SE_LUN(cmd) ((cmd)->se_lun) -#define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) - struct scsi_port_stats { u64 cmd_pdus; u64 tx_data_octets; @@ -930,7 +888,7 @@ struct se_portal_group { spinlock_t tpg_lun_lock; /* Pointer to $FABRIC_MOD portal group */ void *se_tpg_fabric_ptr; - struct list_head se_tpg_list; + struct list_head se_tpg_node; /* linked list for initiator ACL list */ struct list_head acl_node_list; struct se_lun *tpg_lun_list; @@ -949,8 +907,6 @@ struct se_portal_group { struct config_group tpg_param_group; } ____cacheline_aligned; -#define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) - struct se_wwn { struct target_fabric_configfs *wwn_tf; struct config_group wwn_group; @@ -958,28 +914,4 @@ struct se_wwn { struct config_group fabric_stat_group; } ____cacheline_aligned; -struct se_global { - u16 alua_lu_gps_counter; - int g_sub_api_initialized; - u32 in_shutdown; - u32 alua_lu_gps_count; - u32 g_hba_id_counter; - struct config_group target_core_hbagroup; - struct config_group alua_group; - struct config_group alua_lu_gps_group; - struct list_head g_lu_gps_list; - struct list_head g_se_tpg_list; - struct list_head g_hba_list; - struct list_head g_se_dev_list; - struct se_hba *g_lun0_hba; - struct se_subsystem_dev *g_lun0_su_dev; - struct se_device *g_lun0_dev; - struct t10_alua_lu_gp *default_lu_gp; - spinlock_t g_device_lock; - spinlock_t hba_lock; - spinlock_t se_tpg_lock; - spinlock_t lu_gps_lock; - spinlock_t plugin_class_lock; -} ____cacheline_aligned; - #endif /* TARGET_CORE_BASE_H */ diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index 52b18a5752c..46571912086 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -1,8 +1,8 @@ #ifndef TARGET_CORE_DEVICE_H #define TARGET_CORE_DEVICE_H -extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); -extern int transport_get_lun_for_tmr(struct se_cmd *, u32); +extern int transport_lookup_cmd_lun(struct se_cmd *, u32); +extern int transport_lookup_tmr_lun(struct se_cmd *, u32); extern struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *, u16); extern int core_free_device_list_for_node(struct se_node_acl *, @@ -39,6 +39,8 @@ extern int se_dev_set_emulate_tas(struct se_device *, int); extern int se_dev_set_emulate_tpu(struct se_device *, int); extern int se_dev_set_emulate_tpws(struct se_device *, int); extern int se_dev_set_enforce_pr_isids(struct se_device *, int); +extern int se_dev_set_is_nonrot(struct se_device *, int); +extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int); extern int se_dev_set_queue_depth(struct se_device *, u32); extern int se_dev_set_max_sectors(struct se_device *, u32); extern int se_dev_set_optimal_sectors(struct se_device *, u32); diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 747e1404dca..2de8fe90759 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -39,17 +39,11 @@ struct target_core_fabric_ops { */ int (*new_cmd_map)(struct se_cmd *); /* - * Optional function pointer for TCM fabric modules that use - * Linux/NET sockets to allocate struct iovec array to struct se_cmd - */ - int (*alloc_cmd_iovecs)(struct se_cmd *); - /* * Optional to release struct se_cmd and fabric dependent allocated * I/O descriptor in transport_cmd_check_stop() */ void (*check_stop_free)(struct se_cmd *); - void (*release_cmd_to_pool)(struct se_cmd *); - void (*release_cmd_direct)(struct se_cmd *); + void (*release_cmd)(struct se_cmd *); /* * Called with spin_lock_bh(struct se_portal_group->session_lock held. */ @@ -70,7 +64,6 @@ struct target_core_fabric_ops { void (*set_default_node_attributes)(struct se_node_acl *); u32 (*get_task_tag)(struct se_cmd *); int (*get_cmd_state)(struct se_cmd *); - void (*new_cmd_failure)(struct se_cmd *); int (*queue_data_in)(struct se_cmd *); int (*queue_status)(struct se_cmd *); int (*queue_tm_rsp)(struct se_cmd *); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 24a1c6cb83c..46aae4f94ed 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -101,6 +101,10 @@ #define DA_ENFORCE_PR_ISIDS 1 #define DA_STATUS_MAX_SECTORS_MIN 16 #define DA_STATUS_MAX_SECTORS_MAX 8192 +/* By default don't report non-rotating (solid state) medium */ +#define DA_IS_NONROT 0 +/* Queue Algorithm Modifier default for restricted reordering in control mode page */ +#define DA_EMULATE_REST_REORD 0 #define SE_MODE_PAGE_BUF 512 @@ -111,9 +115,8 @@ struct se_subsystem_api; extern struct kmem_cache *se_mem_cache; -extern int init_se_global(void); -extern void release_se_global(void); -extern void init_scsi_index_table(void); +extern int init_se_kmem_caches(void); +extern void release_se_kmem_caches(void); extern u32 scsi_get_new_index(scsi_index_t); extern void transport_init_queue_obj(struct se_queue_obj *); extern int transport_subsystem_check_init(void); @@ -160,36 +163,38 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, struct se_subsystem_dev *, u32, void *, struct se_dev_limits *, const char *, const char *); -extern void transport_device_setup_cmd(struct se_cmd *); extern void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int, unsigned char *); +void *transport_kmap_first_data_page(struct se_cmd *cmd); +void transport_kunmap_first_data_page(struct se_cmd *cmd); extern void transport_free_se_cmd(struct se_cmd *); extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_generic_handle_cdb(struct se_cmd *); +extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern void transport_generic_free_cmd_intr(struct se_cmd *); extern void __transport_stop_task_timer(struct se_task *, unsigned long *); -extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); extern int transport_clear_lun_from_sessions(struct se_lun *); extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); -extern void transport_release_cmd_to_pool(struct se_cmd *); -extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); +extern void transport_release_cmd(struct se_cmd *); +extern void transport_generic_free_cmd(struct se_cmd *, int, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); -extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); +extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, - void *, struct se_mem *, + struct scatterlist *, struct se_mem *, struct se_mem **, u32 *, u32 *); extern void transport_do_task_sg_chain(struct se_cmd *); extern void transport_generic_process_write(struct se_cmd *); +extern int transport_generic_new_cmd(struct se_cmd *); extern int transport_generic_do_tmr(struct se_cmd *); /* From target_core_alua.c */ extern int core_alua_check_nonop_delay(struct se_cmd *); @@ -235,13 +240,13 @@ struct se_subsystem_api { */ int (*cdb_none)(struct se_task *); /* - * For SCF_SCSI_CONTROL_NONSG_IO_CDB + * For SCF_SCSI_DATA_SG_IO_CDB */ - int (*map_task_non_SG)(struct se_task *); + int (*map_data_SG)(struct se_task *); /* - * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB + * For SCF_SCSI_CONTROL_SG_IO_CDB */ - int (*map_task_SG)(struct se_task *); + int (*map_control_SG)(struct se_task *); /* * attach_hba(): */ @@ -292,7 +297,7 @@ struct se_subsystem_api { * drivers. Provided out of convenience. */ int (*transport_complete)(struct se_task *task); - struct se_task *(*alloc_task)(struct se_cmd *); + struct se_task *(*alloc_task)(unsigned char *cdb); /* * do_task(): */ @@ -342,19 +347,9 @@ struct se_subsystem_api { */ sector_t (*get_blocks)(struct se_device *); /* - * do_se_mem_map(): - */ - int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, - struct se_mem *, struct se_mem **, u32 *, u32 *); - /* * get_sense_buffer(): */ unsigned char *(*get_sense_buffer)(struct se_task *); } ____cacheline_aligned; -#define TRANSPORT(dev) ((dev)->transport) -#define HBA_TRANSPORT(hba) ((hba)->transport) - -extern struct se_global *se_global; - #endif /* TARGET_CORE_TRANSPORT_H */ diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h index ae973d2e27a..603f5a0f036 100644 --- a/include/trace/events/asoc.h +++ b/include/trace/events/asoc.h @@ -9,6 +9,7 @@ struct snd_soc_jack; struct snd_soc_codec; +struct snd_soc_platform; struct snd_soc_card; struct snd_soc_dapm_widget; @@ -59,6 +60,50 @@ DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read, ); +DECLARE_EVENT_CLASS(snd_soc_preg, + + TP_PROTO(struct snd_soc_platform *platform, unsigned int reg, + unsigned int val), + + TP_ARGS(platform, reg, val), + + TP_STRUCT__entry( + __string( name, platform->name ) + __field( int, id ) + __field( unsigned int, reg ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, platform->name); + __entry->id = platform->id; + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("platform=%s.%d reg=%x val=%x", __get_str(name), + (int)__entry->id, (unsigned int)__entry->reg, + (unsigned int)__entry->val) +); + +DEFINE_EVENT(snd_soc_preg, snd_soc_preg_write, + + TP_PROTO(struct snd_soc_platform *platform, unsigned int reg, + unsigned int val), + + TP_ARGS(platform, reg, val) + +); + +DEFINE_EVENT(snd_soc_preg, snd_soc_preg_read, + + TP_PROTO(struct snd_soc_platform *platform, unsigned int reg, + unsigned int val), + + TP_ARGS(platform, reg, val) + +); + DECLARE_EVENT_CLASS(snd_soc_card, TP_PROTO(struct snd_soc_card *card, int val), diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h new file mode 100644 index 00000000000..779abb91df8 --- /dev/null +++ b/include/trace/events/sock.h @@ -0,0 +1,68 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sock + +#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SOCK_H + +#include <net/sock.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(sock_rcvqueue_full, + + TP_PROTO(struct sock *sk, struct sk_buff *skb), + + TP_ARGS(sk, skb), + + TP_STRUCT__entry( + __field(int, rmem_alloc) + __field(unsigned int, truesize) + __field(int, sk_rcvbuf) + ), + + TP_fast_assign( + __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + __entry->truesize = skb->truesize; + __entry->sk_rcvbuf = sk->sk_rcvbuf; + ), + + TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d", + __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf) +); + +TRACE_EVENT(sock_exceed_buf_limit, + + TP_PROTO(struct sock *sk, struct proto *prot, long allocated), + + TP_ARGS(sk, prot, allocated), + + TP_STRUCT__entry( + __array(char, name, 32) + __field(long *, sysctl_mem) + __field(long, allocated) + __field(int, sysctl_rmem) + __field(int, rmem_alloc) + ), + + TP_fast_assign( + strncpy(__entry->name, prot->name, 32); + __entry->sysctl_mem = prot->sysctl_mem; + __entry->allocated = allocated; + __entry->sysctl_rmem = prot->sysctl_rmem[0]; + __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); + ), + + TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " + "sysctl_rmem=%d rmem_alloc=%d", + __entry->name, + __entry->sysctl_mem[0], + __entry->sysctl_mem[1], + __entry->sysctl_mem[2], + __entry->allocated, + __entry->sysctl_rmem, + __entry->rmem_alloc) +); + +#endif /* _TRACE_SOCK_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h new file mode 100644 index 00000000000..a664bb94097 --- /dev/null +++ b/include/trace/events/udp.h @@ -0,0 +1,32 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM udp + +#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_UDP_H + +#include <linux/udp.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(udp_fail_queue_rcv_skb, + + TP_PROTO(int rc, struct sock *sk), + + TP_ARGS(rc, sk), + + TP_STRUCT__entry( + __field(int, rc) + __field(__u16, lport) + ), + + TP_fast_assign( + __entry->rc = rc; + __entry->lport = inet_sk(sk)->inet_num; + ), + + TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport) +); + +#endif /* _TRACE_UDP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index b2c33bd955f..36851f7f13d 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -179,6 +179,83 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re TP_ARGS(nr_reclaimed) ); +TRACE_EVENT(mm_shrink_slab_start, + TP_PROTO(struct shrinker *shr, struct shrink_control *sc, + long nr_objects_to_shrink, unsigned long pgs_scanned, + unsigned long lru_pgs, unsigned long cache_items, + unsigned long long delta, unsigned long total_scan), + + TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, + cache_items, delta, total_scan), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(void *, shrink) + __field(long, nr_objects_to_shrink) + __field(gfp_t, gfp_flags) + __field(unsigned long, pgs_scanned) + __field(unsigned long, lru_pgs) + __field(unsigned long, cache_items) + __field(unsigned long long, delta) + __field(unsigned long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->shrink = shr->shrink; + __entry->nr_objects_to_shrink = nr_objects_to_shrink; + __entry->gfp_flags = sc->gfp_mask; + __entry->pgs_scanned = pgs_scanned; + __entry->lru_pgs = lru_pgs; + __entry->cache_items = cache_items; + __entry->delta = delta; + __entry->total_scan = total_scan; + ), + + TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", + __entry->shrink, + __entry->shr, + __entry->nr_objects_to_shrink, + show_gfp_flags(__entry->gfp_flags), + __entry->pgs_scanned, + __entry->lru_pgs, + __entry->cache_items, + __entry->delta, + __entry->total_scan) +); + +TRACE_EVENT(mm_shrink_slab_end, + TP_PROTO(struct shrinker *shr, int shrinker_retval, + long unused_scan_cnt, long new_scan_cnt), + + TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(void *, shrink) + __field(long, unused_scan) + __field(long, new_scan) + __field(int, retval) + __field(long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->shrink = shr->shrink; + __entry->unused_scan = unused_scan_cnt; + __entry->new_scan = new_scan_cnt; + __entry->retval = shrinker_retval; + __entry->total_scan = new_scan_cnt - unused_scan_cnt; + ), + + TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", + __entry->shrink, + __entry->shr, + __entry->unused_scan, + __entry->new_scan, + __entry->total_scan, + __entry->retval) +); DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h new file mode 100644 index 00000000000..44d8decee09 --- /dev/null +++ b/include/trace/events/xen.h @@ -0,0 +1,504 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xen + +#if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XEN_H + +#include <linux/tracepoint.h> +#include <asm/paravirt_types.h> +#include <asm/xen/trace_types.h> + +/* Multicalls */ +DECLARE_EVENT_CLASS(xen_mc__batch, + TP_PROTO(enum paravirt_lazy_mode mode), + TP_ARGS(mode), + TP_STRUCT__entry( + __field(enum paravirt_lazy_mode, mode) + ), + TP_fast_assign(__entry->mode = mode), + TP_printk("start batch LAZY_%s", + (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" : + (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE") + ); +#define DEFINE_XEN_MC_BATCH(name) \ + DEFINE_EVENT(xen_mc__batch, name, \ + TP_PROTO(enum paravirt_lazy_mode mode), \ + TP_ARGS(mode)) + +DEFINE_XEN_MC_BATCH(xen_mc_batch); +DEFINE_XEN_MC_BATCH(xen_mc_issue); + +TRACE_EVENT(xen_mc_entry, + TP_PROTO(struct multicall_entry *mc, unsigned nargs), + TP_ARGS(mc, nargs), + TP_STRUCT__entry( + __field(unsigned int, op) + __field(unsigned int, nargs) + __array(unsigned long, args, 6) + ), + TP_fast_assign(__entry->op = mc->op; + __entry->nargs = nargs; + memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs); + memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs)); + ), + TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]", + __entry->op, xen_hypercall_name(__entry->op), + __entry->args[0], __entry->args[1], __entry->args[2], + __entry->args[3], __entry->args[4], __entry->args[5]) + ); + +TRACE_EVENT(xen_mc_entry_alloc, + TP_PROTO(size_t args), + TP_ARGS(args), + TP_STRUCT__entry( + __field(size_t, args) + ), + TP_fast_assign(__entry->args = args), + TP_printk("alloc entry %zu arg bytes", __entry->args) + ); + +TRACE_EVENT(xen_mc_callback, + TP_PROTO(xen_mc_callback_fn_t fn, void *data), + TP_ARGS(fn, data), + TP_STRUCT__entry( + __field(xen_mc_callback_fn_t, fn) + __field(void *, data) + ), + TP_fast_assign( + __entry->fn = fn; + __entry->data = data; + ), + TP_printk("callback %pf, data %p", + __entry->fn, __entry->data) + ); + +TRACE_EVENT(xen_mc_flush_reason, + TP_PROTO(enum xen_mc_flush_reason reason), + TP_ARGS(reason), + TP_STRUCT__entry( + __field(enum xen_mc_flush_reason, reason) + ), + TP_fast_assign(__entry->reason = reason), + TP_printk("flush reason %s", + (__entry->reason == XEN_MC_FL_NONE) ? "NONE" : + (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" : + (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" : + (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??") + ); + +TRACE_EVENT(xen_mc_flush, + TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx), + TP_ARGS(mcidx, argidx, cbidx), + TP_STRUCT__entry( + __field(unsigned, mcidx) + __field(unsigned, argidx) + __field(unsigned, cbidx) + ), + TP_fast_assign(__entry->mcidx = mcidx; + __entry->argidx = argidx; + __entry->cbidx = cbidx), + TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks", + __entry->mcidx, __entry->argidx, __entry->cbidx) + ); + +TRACE_EVENT(xen_mc_extend_args, + TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res), + TP_ARGS(op, args, res), + TP_STRUCT__entry( + __field(unsigned int, op) + __field(size_t, args) + __field(enum xen_mc_extend_args, res) + ), + TP_fast_assign(__entry->op = op; + __entry->args = args; + __entry->res = res), + TP_printk("extending op %u%s by %zu bytes res %s", + __entry->op, xen_hypercall_name(__entry->op), + __entry->args, + __entry->res == XEN_MC_XE_OK ? "OK" : + __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" : + __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???") + ); + +/* mmu */ +DECLARE_EVENT_CLASS(xen_mmu__set_pte, + TP_PROTO(pte_t *ptep, pte_t pteval), + TP_ARGS(ptep, pteval), + TP_STRUCT__entry( + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("ptep %p pteval %0*llx (raw %0*llx)", + __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); + +#define DEFINE_XEN_MMU_SET_PTE(name) \ + DEFINE_EVENT(xen_mmu__set_pte, name, \ + TP_PROTO(pte_t *ptep, pte_t pteval), \ + TP_ARGS(ptep, pteval)) + +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); +DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic); + +TRACE_EVENT(xen_mmu_set_domain_pte, + TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid), + TP_ARGS(ptep, pteval, domid), + TP_STRUCT__entry( + __field(pte_t *, ptep) + __field(pteval_t, pteval) + __field(unsigned, domid) + ), + TP_fast_assign(__entry->ptep = ptep; + __entry->pteval = pteval.pte; + __entry->domid = domid), + TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u", + __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval, + __entry->domid) + ); + +TRACE_EVENT(xen_mmu_set_pte_at, + TP_PROTO(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval), + TP_ARGS(mm, addr, ptep, pteval), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", + __entry->mm, __entry->addr, __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); + +TRACE_EVENT(xen_mmu_pte_clear, + TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep), + TP_ARGS(mm, addr, ptep), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep), + TP_printk("mm %p addr %lx ptep %p", + __entry->mm, __entry->addr, __entry->ptep) + ); + +TRACE_EVENT(xen_mmu_set_pmd, + TP_PROTO(pmd_t *pmdp, pmd_t pmdval), + TP_ARGS(pmdp, pmdval), + TP_STRUCT__entry( + __field(pmd_t *, pmdp) + __field(pmdval_t, pmdval) + ), + TP_fast_assign(__entry->pmdp = pmdp; + __entry->pmdval = pmdval.pmd), + TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)", + __entry->pmdp, + (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)), + (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval) + ); + +TRACE_EVENT(xen_mmu_pmd_clear, + TP_PROTO(pmd_t *pmdp), + TP_ARGS(pmdp), + TP_STRUCT__entry( + __field(pmd_t *, pmdp) + ), + TP_fast_assign(__entry->pmdp = pmdp), + TP_printk("pmdp %p", __entry->pmdp) + ); + +#if PAGETABLE_LEVELS >= 4 + +TRACE_EVENT(xen_mmu_set_pud, + TP_PROTO(pud_t *pudp, pud_t pudval), + TP_ARGS(pudp, pudval), + TP_STRUCT__entry( + __field(pud_t *, pudp) + __field(pudval_t, pudval) + ), + TP_fast_assign(__entry->pudp = pudp; + __entry->pudval = native_pud_val(pudval)), + TP_printk("pudp %p pudval %0*llx (raw %0*llx)", + __entry->pudp, + (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)), + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) + ); + +TRACE_EVENT(xen_mmu_set_pgd, + TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval), + TP_ARGS(pgdp, user_pgdp, pgdval), + TP_STRUCT__entry( + __field(pgd_t *, pgdp) + __field(pgd_t *, user_pgdp) + __field(pgdval_t, pgdval) + ), + TP_fast_assign(__entry->pgdp = pgdp; + __entry->user_pgdp = user_pgdp; + __entry->pgdval = pgdval.pgd), + TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)", + __entry->pgdp, __entry->user_pgdp, + (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)), + (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval) + ); + +TRACE_EVENT(xen_mmu_pud_clear, + TP_PROTO(pud_t *pudp), + TP_ARGS(pudp), + TP_STRUCT__entry( + __field(pud_t *, pudp) + ), + TP_fast_assign(__entry->pudp = pudp), + TP_printk("pudp %p", __entry->pudp) + ); +#else + +TRACE_EVENT(xen_mmu_set_pud, + TP_PROTO(pud_t *pudp, pud_t pudval), + TP_ARGS(pudp, pudval), + TP_STRUCT__entry( + __field(pud_t *, pudp) + __field(pudval_t, pudval) + ), + TP_fast_assign(__entry->pudp = pudp; + __entry->pudval = native_pud_val(pudval)), + TP_printk("pudp %p pudval %0*llx (raw %0*llx)", + __entry->pudp, + (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)), + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval) + ); + +#endif + +TRACE_EVENT(xen_mmu_pgd_clear, + TP_PROTO(pgd_t *pgdp), + TP_ARGS(pgdp), + TP_STRUCT__entry( + __field(pgd_t *, pgdp) + ), + TP_fast_assign(__entry->pgdp = pgdp), + TP_printk("pgdp %p", __entry->pgdp) + ); + +DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot, + TP_PROTO(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval), + TP_ARGS(mm, addr, ptep, pteval), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + __field(pte_t *, ptep) + __field(pteval_t, pteval) + ), + TP_fast_assign(__entry->mm = mm; + __entry->addr = addr; + __entry->ptep = ptep; + __entry->pteval = pteval.pte), + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)", + __entry->mm, __entry->addr, __entry->ptep, + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)), + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval) + ); +#define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \ + DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \ + TP_PROTO(struct mm_struct *mm, unsigned long addr, \ + pte_t *ptep, pte_t pteval), \ + TP_ARGS(mm, addr, ptep, pteval)) + +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start); +DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit); + +TRACE_EVENT(xen_mmu_alloc_ptpage, + TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned), + TP_ARGS(mm, pfn, level, pinned), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(unsigned long, pfn) + __field(unsigned, level) + __field(bool, pinned) + ), + TP_fast_assign(__entry->mm = mm; + __entry->pfn = pfn; + __entry->level = level; + __entry->pinned = pinned), + TP_printk("mm %p pfn %lx level %d %spinned", + __entry->mm, __entry->pfn, __entry->level, + __entry->pinned ? "" : "un") + ); + +TRACE_EVENT(xen_mmu_release_ptpage, + TP_PROTO(unsigned long pfn, unsigned level, bool pinned), + TP_ARGS(pfn, level, pinned), + TP_STRUCT__entry( + __field(unsigned long, pfn) + __field(unsigned, level) + __field(bool, pinned) + ), + TP_fast_assign(__entry->pfn = pfn; + __entry->level = level; + __entry->pinned = pinned), + TP_printk("pfn %lx level %d %spinned", + __entry->pfn, __entry->level, + __entry->pinned ? "" : "un") + ); + +DECLARE_EVENT_CLASS(xen_mmu_pgd, + TP_PROTO(struct mm_struct *mm, pgd_t *pgd), + TP_ARGS(mm, pgd), + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(pgd_t *, pgd) + ), + TP_fast_assign(__entry->mm = mm; + __entry->pgd = pgd), + TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd) + ); +#define DEFINE_XEN_MMU_PGD_EVENT(name) \ + DEFINE_EVENT(xen_mmu_pgd, name, \ + TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \ + TP_ARGS(mm, pgd)) + +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin); +DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin); + +TRACE_EVENT(xen_mmu_flush_tlb, + TP_PROTO(int x), + TP_ARGS(x), + TP_STRUCT__entry(__array(char, x, 0)), + TP_fast_assign((void)x), + TP_printk("%s", "") + ); + +TRACE_EVENT(xen_mmu_flush_tlb_single, + TP_PROTO(unsigned long addr), + TP_ARGS(addr), + TP_STRUCT__entry( + __field(unsigned long, addr) + ), + TP_fast_assign(__entry->addr = addr), + TP_printk("addr %lx", __entry->addr) + ); + +TRACE_EVENT(xen_mmu_flush_tlb_others, + TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm, + unsigned long addr), + TP_ARGS(cpus, mm, addr), + TP_STRUCT__entry( + __field(unsigned, ncpus) + __field(struct mm_struct *, mm) + __field(unsigned long, addr) + ), + TP_fast_assign(__entry->ncpus = cpumask_weight(cpus); + __entry->mm = mm; + __entry->addr = addr), + TP_printk("ncpus %d mm %p addr %lx", + __entry->ncpus, __entry->mm, __entry->addr) + ); + +TRACE_EVENT(xen_mmu_write_cr3, + TP_PROTO(bool kernel, unsigned long cr3), + TP_ARGS(kernel, cr3), + TP_STRUCT__entry( + __field(bool, kernel) + __field(unsigned long, cr3) + ), + TP_fast_assign(__entry->kernel = kernel; + __entry->cr3 = cr3), + TP_printk("%s cr3 %lx", + __entry->kernel ? "kernel" : "user", __entry->cr3) + ); + + +/* CPU */ +TRACE_EVENT(xen_cpu_write_ldt_entry, + TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc), + TP_ARGS(dt, entrynum, desc), + TP_STRUCT__entry( + __field(struct desc_struct *, dt) + __field(int, entrynum) + __field(u64, desc) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + __entry->desc = desc; + ), + TP_printk("dt %p entrynum %d entry %016llx", + __entry->dt, __entry->entrynum, + (unsigned long long)__entry->desc) + ); + +TRACE_EVENT(xen_cpu_write_idt_entry, + TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent), + TP_ARGS(dt, entrynum, ent), + TP_STRUCT__entry( + __field(gate_desc *, dt) + __field(int, entrynum) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + ), + TP_printk("dt %p entrynum %d", + __entry->dt, __entry->entrynum) + ); + +TRACE_EVENT(xen_cpu_load_idt, + TP_PROTO(const struct desc_ptr *desc), + TP_ARGS(desc), + TP_STRUCT__entry( + __field(unsigned long, addr) + ), + TP_fast_assign(__entry->addr = desc->address), + TP_printk("addr %lx", __entry->addr) + ); + +TRACE_EVENT(xen_cpu_write_gdt_entry, + TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type), + TP_ARGS(dt, entrynum, desc, type), + TP_STRUCT__entry( + __field(u64, desc) + __field(struct desc_struct *, dt) + __field(int, entrynum) + __field(int, type) + ), + TP_fast_assign(__entry->dt = dt; + __entry->entrynum = entrynum; + __entry->desc = *(u64 *)desc; + __entry->type = type; + ), + TP_printk("dt %p entrynum %d type %d desc %016llx", + __entry->dt, __entry->entrynum, __entry->type, + (unsigned long long)__entry->desc) + ); + +TRACE_EVENT(xen_cpu_set_ldt, + TP_PROTO(const void *addr, unsigned entries), + TP_ARGS(addr, entries), + TP_STRUCT__entry( + __field(const void *, addr) + __field(unsigned, entries) + ), + TP_fast_assign(__entry->addr = addr; + __entry->entries = entries), + TP_printk("addr %p entries %u", + __entry->addr, __entry->entries) + ); + + +#endif /* _TRACE_XEN_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/xen/balloon.h b/include/xen/balloon.h index a2b22f01a51..4076ed72afb 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -23,3 +23,13 @@ void balloon_set_new_target(unsigned long target); int alloc_xenballooned_pages(int nr_pages, struct page** pages); void free_xenballooned_pages(int nr_pages, struct page** pages); + +struct sys_device; +#ifdef CONFIG_XEN_SELFBALLOONING +extern int register_xen_selfballooning(struct sys_device *sysdev); +#else +static inline int register_xen_selfballooning(struct sys_device *sysdev) +{ + return -ENOSYS; +} +#endif diff --git a/include/xen/events.h b/include/xen/events.h index 9af21e19545..d287997d3ea 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -74,8 +74,6 @@ int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); -/* Allocate a pirq for a physical interrupt, given a gsi. */ -int xen_allocate_pirq_gsi(unsigned gsi); /* Bind a pirq for a physical interrupt to an irq. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name); diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h index c3adde32669..901724dc528 100644 --- a/include/xen/hvc-console.h +++ b/include/xen/hvc-console.h @@ -6,11 +6,13 @@ extern struct console xenboot_console; #ifdef CONFIG_HVC_XEN void xen_console_resume(void); void xen_raw_console_write(const char *str); +__attribute__((format(printf, 1, 2))) void xen_raw_printk(const char *fmt, ...); #else static inline void xen_console_resume(void) { } static inline void xen_raw_console_write(const char *str) { } -static inline void xen_raw_printk(const char *fmt, ...) { } +static inline __attribute__((format(printf, 1, 2))) +void xen_raw_printk(const char *fmt, ...) { } #endif #endif /* XEN_HVC_CONSOLE_H */ diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 70213b4515e..6acd9cefd51 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -450,6 +450,45 @@ struct start_info { int8_t cmd_line[MAX_GUEST_CMDLINE]; }; +struct dom0_vga_console_info { + uint8_t video_type; +#define XEN_VGATYPE_TEXT_MODE_3 0x03 +#define XEN_VGATYPE_VESA_LFB 0x23 + + union { + struct { + /* Font height, in pixels. */ + uint16_t font_height; + /* Cursor location (column, row). */ + uint16_t cursor_x, cursor_y; + /* Number of rows and columns (dimensions in characters). */ + uint16_t rows, columns; + } text_mode_3; + + struct { + /* Width and height, in pixels. */ + uint16_t width, height; + /* Bytes per scan line. */ + uint16_t bytes_per_line; + /* Bits per pixel. */ + uint16_t bits_per_pixel; + /* LFB physical address, and size (in units of 64kB). */ + uint32_t lfb_base; + uint32_t lfb_size; + /* RGB mask offsets and sizes, as defined by VBE 1.2+ */ + uint8_t red_pos, red_size; + uint8_t green_pos, green_size; + uint8_t blue_pos, blue_size; + uint8_t rsvd_pos, rsvd_size; + + /* VESA capabilities (offset 0xa, VESA command 0x4f00). */ + uint32_t gbl_caps; + /* Mode attributes (offset 0x0, VESA command 0x4f01). */ + uint16_t mode_attrs; + } vesa_lfb; + } u; +}; + /* These flags are passed in the 'flags' field of start_info_t. */ #define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */ #define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */ diff --git a/include/xen/tmem.h b/include/xen/tmem.h new file mode 100644 index 00000000000..82e2c83a32f --- /dev/null +++ b/include/xen/tmem.h @@ -0,0 +1,5 @@ +#ifndef _XEN_TMEM_H +#define _XEN_TMEM_H +/* defined in drivers/xen/tmem.c */ +extern int tmem_enabled; +#endif /* _XEN_TMEM_H */ diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 5467369e088..aceeca799fd 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -223,7 +223,9 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port); enum xenbus_state xenbus_read_driver_state(const char *path); +__attribute__((format(printf, 3, 4))) void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...); +__attribute__((format(printf, 3, 4))) void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...); const char *xenbus_strstate(enum xenbus_state state); |