diff options
Diffstat (limited to 'arch/parisc/include/asm/atomic.h')
| -rw-r--r-- | arch/parisc/include/asm/atomic.h | 163 |
1 files changed, 35 insertions, 128 deletions
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index f81955934ae..0be2db2c7d4 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -6,7 +6,8 @@ #define _ASM_PARISC_ATOMIC_H_ #include <linux/types.h> -#include <asm/system.h> +#include <asm/cmpxchg.h> +#include <asm/barrier.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -49,112 +50,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) #endif -/* This should get optimized out since it's never called. -** Or get a link error if xchg is used "wrong". -*/ -extern void __xchg_called_with_bad_pointer(void); - - -/* __xchg32/64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __xchg8(char, char *); -extern unsigned long __xchg32(int, int *); -#ifdef CONFIG_64BIT -extern unsigned long __xchg64(unsigned long, unsigned long *); -#endif - -/* optimizer better get rid of switch since size is a constant */ -static __inline__ unsigned long -__xchg(unsigned long x, __volatile__ void * ptr, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __xchg64(x,(unsigned long *) ptr); -#endif - case 4: return __xchg32((int) x, (int *) ptr); - case 1: return __xchg8((char) x, (char *) ptr); - } - __xchg_called_with_bad_pointer(); - return x; -} - - -/* -** REVISIT - Abandoned use of LDCW in xchg() for now: -** o need to test sizeof(*ptr) to avoid clearing adjacent bytes -** o and while we are at it, could CONFIG_64BIT code use LDCD too? -** -** if (__builtin_constant_p(x) && (x == NULL)) -** if (((unsigned long)p & 0xf) == 0) -** return __ldcw(p); -*/ -#define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - - -#define __HAVE_ARCH_CMPXCHG 1 - -/* bug catcher for when unsupported size is used - won't link */ -extern void __cmpxchg_called_with_bad_pointer(void); - -/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */ -extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); -extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_); - -/* don't worry...optimizer will get rid of most of this */ -static __inline__ unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) -{ - switch(size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr,o,n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#include <asm-generic/cmpxchg-local.h> - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, - unsigned long new_, int size) -{ - switch (size) { -#ifdef CONFIG_64BIT - case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); -#endif - case 4: return __cmpxchg_u32(ptr, old, new_); - default: - return __cmpxchg_local_generic(ptr, old, new_, size); - } -} - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr)))) -#ifdef CONFIG_64BIT -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#else -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#endif - /* * Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees inconsistent values. @@ -197,15 +92,15 @@ static __inline__ int atomic_read(const atomic_t *v) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** - * atomic_add_unless - add unless the number is a given value + * __atomic_add_unless - add unless the number is a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -217,13 +112,12 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != (u); + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) -#define atomic_sub(i,v) ((void)(__atomic_add_return(-(i),(v)))) +#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) +#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v)))) #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v)))) @@ -248,21 +142,16 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) - -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() +#define ATOMIC_INIT(i) { (i) } #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } -static __inline__ int +static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) { - int ret; + s64 ret; unsigned long flags; _atomic_spin_lock_irqsave(v, flags); @@ -317,7 +206,7 @@ atomic64_read(const atomic64_t *v) * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. + * Returns the old value of @v. */ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) { @@ -336,12 +225,30 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#else /* CONFIG_64BIT */ - -#include <asm-generic/atomic64.h> +/* + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long atomic64_dec_if_positive(atomic64_t *v) +{ + long c, old, dec; + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} #endif /* !CONFIG_64BIT */ -#include <asm-generic/atomic-long.h> #endif /* _ASM_PARISC_ATOMIC_H_ */ |
