diff options
Diffstat (limited to 'include')
56 files changed, 525 insertions, 639 deletions
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index cc9c7e8cced..f3b7b1a59c5 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -572,7 +572,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) if something tries to do an invalid cmpxchg(). */ extern void __cmpxchg_called_with_bad_pointer(void); -static inline unsigned long +static __always_inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { switch (size) { diff --git a/include/asm-arm/arch-omap/io.h b/include/asm-arm/arch-omap/io.h index f5bcc9a1aed..b726acfcab1 100644 --- a/include/asm-arm/arch-omap/io.h +++ b/include/asm-arm/arch-omap/io.h @@ -116,7 +116,11 @@ typedef struct { volatile u32 offset[4096]; } __regbase32; ->offset[((vaddr)&4095)>>2] #define __REG32(paddr) __REGV32(io_p2v(paddr)) -extern void omap_map_common_io(void); +extern void omap1_map_common_io(void); +extern void omap1_init_common_hw(void); + +extern void omap2_map_common_io(void); +extern void omap2_init_common_hw(void); #else diff --git a/include/asm-arm/mutex.h b/include/asm-arm/mutex.h index 6caa59f1f59..cb29d84e690 100644 --- a/include/asm-arm/mutex.h +++ b/include/asm-arm/mutex.h @@ -23,72 +23,71 @@ * simply bail out immediately through the slow path where the lock will be * reattempted until it succeeds. */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - int __ex_flag, __res; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%2] \n" \ - "sub %0, %0, #1 \n" \ - "strex %1, %0, [%2] \n" \ - \ - : "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - if (unlikely(__res || __ex_flag)) \ - fail_fn(count); \ -} while (0) - -#define __mutex_fastpath_lock_retval(count, fail_fn) \ -({ \ - int __ex_flag, __res; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%2] \n" \ - "sub %0, %0, #1 \n" \ - "strex %1, %0, [%2] \n" \ - \ - : "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - __res |= __ex_flag; \ - if (unlikely(__res != 0)) \ - __res = fail_fn(count); \ - __res; \ -}) +static inline void +__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ ( + + "ldrex %0, [%2] \n\t" + "sub %0, %0, #1 \n\t" + "strex %1, %0, [%2] " + + : "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __res |= __ex_flag; + if (unlikely(__res != 0)) + fail_fn(count); +} + +static inline int +__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res; + + __asm__ ( + + "ldrex %0, [%2] \n\t" + "sub %0, %0, #1 \n\t" + "strex %1, %0, [%2] " + + : "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __res |= __ex_flag; + if (unlikely(__res != 0)) + __res = fail_fn(count); + return __res; +} /* * Same trick is used for the unlock fast path. However the original value, * rather than the result, is used to test for success in order to have * better generated assembly. */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - int __ex_flag, __res, __orig; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ ( \ - "ldrex %0, [%3] \n" \ - "add %1, %0, #1 \n" \ - "strex %2, %1, [%3] \n" \ - \ - : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \ - : "r" (&(count)->counter) \ - : "cc","memory" ); \ - \ - if (unlikely(__orig || __ex_flag)) \ - fail_fn(count); \ -} while (0) +static inline void +__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +{ + int __ex_flag, __res, __orig; + + __asm__ ( + + "ldrex %0, [%3] \n\t" + "add %1, %0, #1 \n\t" + "strex %2, %1, [%3] " + + : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) + : "r" (&(count)->counter) + : "cc","memory" ); + + __orig |= __ex_flag; + if (unlikely(__orig != 0)) + fail_fn(count); +} /* * If the unlock was done on a contended lock, or if the unlock simply fails @@ -110,12 +109,12 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) __asm__ ( - "1: ldrex %0, [%3] \n" - "subs %1, %0, #1 \n" - "strexeq %2, %1, [%3] \n" - "movlt %0, #0 \n" - "cmpeq %2, #0 \n" - "bgt 1b \n" + "1: ldrex %0, [%3] \n\t" + "subs %1, %0, #1 \n\t" + "strexeq %2, %1, [%3] \n\t" + "movlt %0, #0 \n\t" + "cmpeq %2, #0 \n\t" + "bgt 1b " : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) : "r" (&count->counter) diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index 597496ed2ae..cf6f2cd9c51 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h @@ -315,8 +315,9 @@ #define __NR_faccessat 307 #define __NR_pselect6 308 #define __NR_ppoll 309 +#define __NR_unshare 310 -#define NR_syscalls 310 +#define NR_syscalls 311 /* * user-visible error numbers are in the range -1 - -128: see diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 09b99029ac1..23c8e1be191 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -559,6 +559,23 @@ ia64_eoi (void) #define cpu_relax() ia64_hint(ia64_hint_pause) +static inline int +ia64_get_irr(unsigned int vector) +{ + unsigned int reg = vector / 64; + unsigned int bit = vector % 64; + u64 irr; + + switch (reg) { + case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; + case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; + case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; + case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; + } + + return test_bit(bit, &irr); +} + static inline void ia64_set_lrr0 (unsigned long val) { diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 313cad0628d..0b210abbe00 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -658,15 +658,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, return isrv.status; } -/* Flush all the processor and platform level instruction and/or data caches */ -static inline s64 -ia64_sal_cache_flush (u64 cache_type) -{ - struct ia64_sal_retval isrv; - SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); - return isrv.status; -} - +extern s64 ia64_sal_cache_flush (u64 cache_type); /* Initialize all the processor and platform level instruction and data caches */ static inline s64 diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h index f50da3d91d0..01e5b410323 100644 --- a/include/asm-ia64/sn/bte.h +++ b/include/asm-ia64/sn/bte.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. */ @@ -100,13 +100,28 @@ #define BTE_LNSTAT_STORE(_bte, _x) \ HUB_S(_bte->bte_base_addr, (_x)) #define BTE_SRC_STORE(_bte, _x) \ - HUB_S(_bte->bte_source_addr, (_x)) +({ \ + u64 __addr = ((_x) & ~AS_MASK); \ + if (is_shub2()) \ + __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ + HUB_S(_bte->bte_source_addr, __addr); \ +}) #define BTE_DEST_STORE(_bte, _x) \ - HUB_S(_bte->bte_destination_addr, (_x)) +({ \ + u64 __addr = ((_x) & ~AS_MASK); \ + if (is_shub2()) \ + __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ + HUB_S(_bte->bte_destination_addr, __addr); \ +}) #define BTE_CTRL_STORE(_bte, _x) \ HUB_S(_bte->bte_control_addr, (_x)) #define BTE_NOTIF_STORE(_bte, _x) \ - HUB_S(_bte->bte_notify_addr, (_x)) +({ \ + u64 __addr = ia64_tpa((_x) & ~AS_MASK); \ + if (is_shub2()) \ + __addr = SH2_TIO_PHYS_TO_DMA(__addr); \ + HUB_S(_bte->bte_notify_addr, __addr); \ +}) #define BTE_START_TRANSFER(_bte, _len, _mode) \ is_shub2() ? BTE_CTRL_STORE(_bte, IBLS_BUSY | (_mode << 24) | _len) \ diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h index a3431372c6e..60a51a406ee 100644 --- a/include/asm-ia64/sn/intr.h +++ b/include/asm-ia64/sn/intr.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. */ #ifndef _ASM_IA64_SN_INTR_H @@ -11,26 +11,26 @@ #include <linux/rcupdate.h> -#define SGI_UART_VECTOR (0xe9) +#define SGI_UART_VECTOR 0xe9 /* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ -#define SGI_XPC_ACTIVATE (0x30) -#define SGI_II_ERROR (0x31) -#define SGI_XBOW_ERROR (0x32) -#define SGI_PCIASIC_ERROR (0x33) -#define SGI_ACPI_SCI_INT (0x34) -#define SGI_TIOCA_ERROR (0x35) -#define SGI_TIO_ERROR (0x36) -#define SGI_TIOCX_ERROR (0x37) -#define SGI_MMTIMER_VECTOR (0x38) -#define SGI_XPC_NOTIFY (0xe7) - -#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c) -#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6) - -#define SN2_IRQ_RESERVED (0x1) -#define SN2_IRQ_CONNECTED (0x2) -#define SN2_IRQ_SHARED (0x4) +#define SGI_XPC_ACTIVATE 0x30 +#define SGI_II_ERROR 0x31 +#define SGI_XBOW_ERROR 0x32 +#define SGI_PCIASIC_ERROR 0x33 +#define SGI_ACPI_SCI_INT 0x34 +#define SGI_TIOCA_ERROR 0x35 +#define SGI_TIO_ERROR 0x36 +#define SGI_TIOCX_ERROR 0x37 +#define SGI_MMTIMER_VECTOR 0x38 +#define SGI_XPC_NOTIFY 0xe7 + +#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c +#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6 + +#define SN2_IRQ_RESERVED 0x1 +#define SN2_IRQ_CONNECTED 0x2 +#define SN2_IRQ_SHARED 0x4 // The SN PROM irq struct struct sn_irq_info { diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 80c5a234e25..06253871562 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -249,32 +249,7 @@ extern void ia64_load_extra (struct task_struct *task); # define switch_to(prev,next,last) __switch_to(prev, next, last) #endif -/* - * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch, - * because that could cause a deadlock. Here is an example by Erich Focht: - * - * Example: - * CPU#0: - * schedule() - * -> spin_lock_irq(&rq->lock) - * -> context_switch() - * -> wrap_mmu_context() - * -> read_lock(&tasklist_lock) - * - * CPU#1: - * sys_wait4() or release_task() or forget_original_parent() - * -> write_lock(&tasklist_lock) - * -> do_notify_parent() - * -> wake_up_parent() - * -> try_to_wake_up() - * -> spin_lock_irq(&parent_rq->lock) - * - * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock - * of that CPU which will not be released, because there we wait for the - * tasklist_lock to become available. - */ #define __ARCH_WANT_UNLOCKED_CTXSW - #define ARCH_HAS_PREFETCH_SWITCH_STACK #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) diff --git a/include/asm-m68knommu/hardirq.h b/include/asm-m68knommu/hardirq.h index e8659e739a6..476180f4cba 100644 --- a/include/asm-m68knommu/hardirq.h +++ b/include/asm-m68knommu/hardirq.h @@ -4,6 +4,7 @@ #include <linux/config.h> #include <linux/cache.h> #include <linux/threads.h> +#include <asm/irq.h> typedef struct { unsigned int __softirq_pending; diff --git a/include/asm-mips/abi.h b/include/asm-mips/abi.h index 2e7e651c3e3..1ce0518ace2 100644 --- a/include/asm-mips/abi.h +++ b/include/asm-mips/abi.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2005 by Ralf Baechle + * Copyright (C) 2005, 06 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2005 MIPS Technologies, Inc. */ #ifndef _ASM_ABI_H @@ -13,7 +13,7 @@ #include <asm/siginfo.h> struct mips_abi { - int (* const do_signal)(sigset_t *oldset, struct pt_regs *regs); + void (* const do_signal)(struct pt_regs *regs); int (* const setup_frame)(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set); diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 3b0c8aaf6e8..8e802059fe6 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -644,20 +644,26 @@ static inline unsigned long ffz(unsigned long word) } /* - * flz - find last zero in word. + * fls - find last bit set. * @word: The word to search * - * Returns 0..SZLONG-1 - * Undefined if no zero exists, so code should check against ~0UL first. + * Returns 1..SZLONG + * Returns 0 if no bit exists */ -static inline unsigned long flz(unsigned long word) +static inline unsigned long fls(unsigned long word) { -#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) - return __ilog2(~word); -#else #ifdef CONFIG_32BIT - int r = 31, s; - word = ~word; +#ifdef CONFIG_CPU_MIPS32 + __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); + + return 32 - word; +#else + { + int r = 32, s; + + if (word == 0) + return 0; + s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; @@ -665,10 +671,23 @@ static inline unsigned long flz(unsigned long word) s = 1; if ((word & 0x80000000)) s = 0; r -= s; return r; + } #endif +#endif /* CONFIG_32BIT */ + #ifdef CONFIG_64BIT - int r = 63, s; - word = ~word; +#ifdef CONFIG_CPU_MIPS64 + + __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); + + return 64 - word; +#else + { + int r = 64, s; + + if (word == 0) + return 0; + s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; @@ -677,24 +696,11 @@ static inline unsigned long flz(unsigned long word) s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; return r; + } #endif -#endif +#endif /* CONFIG_64BIT */ } -/* - * fls - find last bit set. - * @word: The word to search - * - * Returns 1..SZLONG - * Returns 0 if no bit exists - */ -static inline unsigned long fls(unsigned long word) -{ - if (word == 0) - return 0; - - return flz(~word) + 1; -} #define fls64(x) generic_fls64(x) /* diff --git a/include/asm-mips/byteorder.h b/include/asm-mips/byteorder.h index d1fe9e5c62e..584f8128fff 100644 --- a/include/asm-mips/byteorder.h +++ b/include/asm-mips/byteorder.h @@ -8,10 +8,39 @@ #ifndef _ASM_BYTEORDER_H #define _ASM_BYTEORDER_H +#include <linux/config.h> +#include <linux/compiler.h> #include <asm/types.h> #ifdef __GNUC__ +#ifdef CONFIG_CPU_MIPSR2 + +static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) +{ + __asm__( + " wsbh %0, %1 \n" + : "=r" (x) + : "r" (x)); + + return x; +} +#define __arch__swab16(x) ___arch__swab16(x) + +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + __asm__( + " wsbh %0, %1 \n" + " rotr %0, %0, 16 \n" + : "=r" (x) + : "r" (x)); + + return x; +} +#define __arch__swab32(x) ___arch__swab32(x) + +#endif /* CONFIG_CPU_MIPSR2 */ + #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) # define __BYTEORDER_HAS_U64__ # define __SWAB_64_THRU_32__ diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h index a18ba2edc0b..aeae9fabf4a 100644 --- a/include/asm-mips/cacheflush.h +++ b/include/asm-mips/cacheflush.h @@ -49,8 +49,7 @@ static inline void flush_dcache_page(struct page *page) extern void (*flush_icache_page)(struct vm_area_struct *vma, struct page *page); -extern void (*flush_icache_range)(unsigned long __user start, - unsigned long __user end); +extern void (*flush_icache_range)(unsigned long start, unsigned long end); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h index 2fc90632f88..6111a0ce58c 100644 --- a/include/asm-mips/hazards.h +++ b/include/asm-mips/hazards.h @@ -100,7 +100,7 @@ __asm__( " .macro _ssnop \n\t" - " sll $0, $2, 1 \n\t" + " sll $0, $0, 1 \n\t" " .endm \n\t" " \n\t" " .macro _ehb \n\t" diff --git a/include/asm-mips/interrupt.h b/include/asm-mips/interrupt.h index abdf54ee64c..774348734fa 100644 --- a/include/asm-mips/interrupt.h +++ b/include/asm-mips/interrupt.h @@ -47,6 +47,17 @@ static inline void local_irq_enable(void) * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs * no nops at all. */ +/* |