diff options
Diffstat (limited to 'arch/arm/include')
79 files changed, 907 insertions, 704 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 6550db3aa5c..960abceb8e1 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,3 +1,20 @@ include include/asm-generic/Kbuild.asm header-y += hwcap.h + +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += ioctl.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += local.h +generic-y += local64.h +generic-y += percpu.h +generic-y += poll.h +generic-y += resource.h +generic-y += sections.h +generic-y += siginfo.h +generic-y += sizes.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5..29035e86a59 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -293,4 +293,13 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm + +/* Utility macro for declaring string literals */ + .macro string name:req, string + .type \name , #object +\name: + .asciz "\string" + .size \name , . - \name + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 7e79503ab89..86976d03438 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -208,16 +208,15 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) c = old; - return c != u; + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) @@ -460,9 +459,6 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -#else /* !CONFIG_GENERIC_ATOMIC64 */ -#include <asm-generic/atomic64.h> -#endif -#include <asm-generic/atomic-long.h> +#endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif #endif diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h deleted file mode 100644 index c0536f6b29a..00000000000 --- a/arch/arm/include/asm/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASMARM_AUXVEC_H -#define __ASMARM_AUXVEC_H - -#endif diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442..f7419ef9c8f 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -26,8 +26,8 @@ #include <linux/compiler.h> #include <asm/system.h> -#define smp_mb__before_clear_bit() mb() -#define smp_mb__after_clear_bit() mb() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() /* * These functions are the basis of our bit ops. @@ -310,10 +310,7 @@ static inline int find_next_bit_le(const void *p, int size, int offset) /* * Ext2 is defined to use little-endian byte ordering. */ -#define ext2_set_bit_atomic(lock, nr, p) \ - test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) \ - test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/arm/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 4d88425a416..9abe7a07d5a 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -3,21 +3,58 @@ #ifdef CONFIG_BUG -#ifdef CONFIG_DEBUG_BUGVERBOSE -extern void __bug(const char *file, int line) __attribute__((noreturn)); - -/* give file/line information */ -#define BUG() __bug(__FILE__, __LINE__) +/* + * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling. + * We need to be careful not to conflict with those used by other modules and + * the register_undef_hook() system. + */ +#ifdef CONFIG_THUMB2_KERNEL +#define BUG_INSTR_VALUE 0xde02 +#define BUG_INSTR_TYPE ".hword " #else +#define BUG_INSTR_VALUE 0xe7f001f2 +#define BUG_INSTR_TYPE ".word " +#endif -/* this just causes an oops */ -#define BUG() do { *(int *)0 = 0; } while (1) -#endif +#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE) +#define _BUG(file, line, value) __BUG(file, line, value) + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +/* + * The extra indirection is to ensure that the __FILE__ string comes through + * OK. Many version of gcc do not support the asm %c parameter which would be + * preferable to this unpleasantness. We use mergeable string sections to + * avoid multiple copies of the string appearing in the kernel image. + */ + +#define __BUG(__file, __line, __value) \ +do { \ + BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \ + asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \ + ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ + "2:\t.asciz " #__file "\n" \ + ".popsection\n" \ + ".pushsection __bug_table,\"a\"\n" \ + "3:\t.word 1b, 2b\n" \ + "\t.hword " #__line ", 0\n" \ + ".popsection"); \ + unreachable(); \ +} while (0) + +#else /* not CONFIG_DEBUG_BUGVERBOSE */ + +#define __BUG(__file, __line, __value) \ +do { \ + asm volatile(BUG_INSTR_TYPE #__value); \ + unreachable(); \ +} while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ #define HAVE_ARCH_BUG -#endif +#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h index c023db09fcc..7ea78144ae2 100644 --- a/arch/arm/include/asm/cachetype.h +++ b/arch/arm/include/asm/cachetype.h @@ -7,6 +7,7 @@ #define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) #define CACHEID_ASID_TAGGED (1 << 3) #define CACHEID_VIPT_I_ALIASING (1 << 4) +#define CACHEID_PIPT (1 << 5) extern unsigned int cacheid; @@ -16,6 +17,7 @@ extern unsigned int cacheid; #define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) #define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) #define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING) +#define icache_is_pipt() cacheid_is(CACHEID_PIPT) /* * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture @@ -26,7 +28,8 @@ extern unsigned int cacheid; #if __LINUX_ARM_ARCH__ >= 7 #define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\ CACHEID_ASID_TAGGED |\ - CACHEID_VIPT_I_ALIASING) + CACHEID_VIPT_I_ALIASING |\ + CACHEID_PIPT) #elif __LINUX_ARM_ARCH__ >= 6 #define __CACHEID_ARCH_MIN (~CACHEID_VIVT) #else diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index 765d3322236..80751c15c30 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -14,7 +14,12 @@ #include <linux/slab.h> +#ifdef CONFIG_HAVE_MACH_CLKDEV #include <mach/clkdev.h> +#else +#define __clk_get(clk) ({ 1; }) +#define __clk_put(clk) do { } while (0) +#endif static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) { diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h deleted file mode 100644 index 3a8002a5fec..00000000000 --- a/arch/arm/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARM_CPUTIME_H -#define __ARM_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __ARM_CPUTIME_H */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cd4458f6417..cb47d28cbe1 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -8,6 +8,7 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPIDR 5 #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" @@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) return read_cpuid(CPUID_TCM); } +static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(CPUID_MPIDR); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 9f390ce335c..7aa368003b0 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -10,9 +10,17 @@ struct dev_archdata { #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif +#ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ +#endif }; +struct omap_device; + struct pdev_archdata { +#ifdef CONFIG_ARCH_OMAP + struct omap_device *od; +#endif }; #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363e..cb3b7c981c4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -32,7 +32,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { - return (void *)__bus_to_virt(addr); + return (void *)__bus_to_virt((unsigned long)addr); } static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) @@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ___dma_page_dev_to_cpu(page, off, size, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) -{ - if (mask < ISA_DMA_THRESHOLD) - return 0; - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_DMABOUNCE - if (dev->archdata.dmabounce) { - if (dma_mask >= ISA_DMA_THRESHOLD) - return 0; - else - return -EIO; - } -#endif - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} +extern int dma_supported(struct device *, u64); +extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. @@ -236,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, int dma_mmap_writecombine(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t); +/* + * This can be called during boot to increase the size of the consistent + * DMA region above it's default value of 2MB. It must be called before the + * memory allocator is initialised, i.e. before any core_initcall. + */ +extern void __init init_consistent_dma_size(unsigned long size); + #ifdef CONFIG_DMABOUNCE /* @@ -256,14 +232,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, * @dev: valid struct device pointer * @small_buf_size: size of buffers to use with small buffer pool * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing * * This function should be called by low-level platform code to register * a device as requireing DMA buffer bouncing. The function will allocate * appropriate DMA pools for the device. - * */ extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long); + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); /** * dmabounce_unregister_dev @@ -277,31 +253,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t __dma_map_single(struct device *, void *, size_t, - enum dma_data_direction); -extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, @@ -328,13 +282,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } -static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - __dma_single_cpu_to_dev(cpu_addr, size, dir); - return virt_to_dma(dev, cpu_addr); -} - static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { @@ -342,12 +289,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -373,14 +314,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { |