diff options
Diffstat (limited to 'arch/blackfin/include/asm/bitops.h')
| -rw-r--r-- | arch/blackfin/include/asm/bitops.h | 238 |
1 files changed, 49 insertions, 189 deletions
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index 21b036eadab..b298b654a26 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h @@ -1,26 +1,44 @@ -#ifndef _BLACKFIN_BITOPS_H -#define _BLACKFIN_BITOPS_H - /* - * Copyright 1992, Linus Torvalds. + * Copyright 2004-2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. */ +#ifndef _BLACKFIN_BITOPS_H +#define _BLACKFIN_BITOPS_H + #include <linux/compiler.h> -#include <asm/byteorder.h> /* swab32 */ -#ifdef __KERNEL__ +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif -#include <asm-generic/bitops/ffs.h> -#include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/const_hweight.h> +#include <asm-generic/bitops/lock.h> -#ifdef CONFIG_SMP +#include <asm-generic/bitops/ext2-atomic.h> +#include <asm/barrier.h> + +#ifndef CONFIG_SMP +#include <linux/irqflags.h> +/* + * clear_bit may not imply a memory barrier + */ +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/non-atomic.h> +#else + +#include <asm/byteorder.h> /* swab32 */ #include <linux/linkage.h> asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); @@ -79,202 +97,44 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) return __raw_bit_test_toggle_asm(a, nr & 0x1f); } -#else /* !CONFIG_SMP */ - -#include <asm/system.h> /* save_flags */ - -static inline void set_bit(int nr, volatile unsigned long *addr) -{ - int *a = (int *)addr; - int mask; - unsigned long flags; - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save_hw(flags); - *a |= mask; - local_irq_restore_hw(flags); -} - -static inline void clear_bit(int nr, volatile unsigned long *addr) -{ - int *a = (int *)addr; - int mask; - unsigned long flags; - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save_hw(flags); - *a &= ~mask; - local_irq_restore_hw(flags); -} - -static inline void change_bit(int nr, volatile unsigned long *addr) -{ - int mask, flags; - unsigned long *ADDR = (unsigned long *)addr; - - ADDR += nr >> 5; - mask = 1 << (nr & 31); - local_irq_save_hw(flags); - *ADDR ^= mask; - local_irq_restore_hw(flags); -} - -static inline int test_and_set_bit(int nr, volatile unsigned long *addr) -{ - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save_hw(flags); - retval = (mask & *a) != 0; - *a |= mask; - local_irq_restore_hw(flags); - - return retval; -} - -static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save_hw(flags); - retval = (mask & *a) != 0; - *a &= ~mask; - local_irq_restore_hw(flags); - - return retval; -} - -static inline int test_and_change_bit(int nr, volatile unsigned long *addr) -{ - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - unsigned long flags; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - local_irq_save_hw(flags); - retval = (mask & *a) != 0; - *a ^= mask; - local_irq_restore_hw(flags); - return retval; -} +#define test_bit __skip_test_bit +#include <asm-generic/bitops/non-atomic.h> +#undef test_bit #endif /* CONFIG_SMP */ +/* Needs to be after test_bit and friends */ +#include <asm-generic/bitops/le.h> + /* - * clear_bit() doesn't provide any barrier for the compiler. + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -static inline void __set_bit(int nr, volatile unsigned long *addr) -{ - int *a = (int *)addr; - int mask; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a |= mask; -} - -static inline void __clear_bit(int nr, volatile unsigned long *addr) -{ - int *a = (int *)addr; - int mask; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - *a &= ~mask; -} - -static inline void __change_bit(int nr, volatile unsigned long *addr) -{ - int mask; - unsigned long *ADDR = (unsigned long *)addr; - ADDR += nr >> 5; - mask = 1 << (nr & 31); - *ADDR ^= mask; -} - -static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) +static inline unsigned int __arch_hweight32(unsigned int w) { - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a |= mask; - return retval; -} + unsigned int res; -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a &= ~mask; - return retval; + __asm__ ("%0.l = ONES %1;" + "%0 = %0.l (Z);" + : "=d" (res) : "d" (w)); + return res; } -static inline int __test_and_change_bit(int nr, - volatile unsigned long *addr) +static inline unsigned int __arch_hweight64(__u64 w) { - int mask, retval; - volatile unsigned int *a = (volatile unsigned int *)addr; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - retval = (mask & *a) != 0; - *a ^= mask; - return retval; + return __arch_hweight32((unsigned int)(w >> 32)) + + __arch_hweight32((unsigned int)w); } -static inline int __test_bit(int nr, const void *addr) +static inline unsigned int __arch_hweight16(unsigned int w) { - int *a = (int *)addr; - int mask; - - a += nr >> 5; - mask = 1 << (nr & 0x1f); - return ((mask & *a) != 0); + return __arch_hweight32(w & 0xffff); } -#ifndef CONFIG_SMP -/* - * This routine doesn't need irq save and restore ops in UP - * context. - */ -static inline int test_bit(int nr, const void *addr) +static inline unsigned int __arch_hweight8(unsigned int w) { - return __test_bit(nr, addr); + return __arch_hweight32(w & 0xff); } -#endif - -#include <asm-generic/bitops/find.h> -#include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> - -#include <asm-generic/bitops/ext2-atomic.h> -#include <asm-generic/bitops/ext2-non-atomic.h> - -#include <asm-generic/bitops/minix.h> - -#endif /* __KERNEL__ */ - -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls64.h> #endif /* _BLACKFIN_BITOPS_H */ |
