diff options
Diffstat (limited to 'arch/s390/include/asm/atomic.h')
| -rw-r--r-- | arch/s390/include/asm/atomic.h | 248 |
1 files changed, 168 insertions, 80 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea11718..fa934fe080c 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -1,79 +1,131 @@ -#ifndef __ARCH_S390_ATOMIC__ -#define __ARCH_S390_ATOMIC__ - /* - * Copyright 1999,2009 IBM Corp. + * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * Denis Joseph Barrow, * Arnd Bergmann <arndb@de.ibm.com>, * * Atomic operations that C can't guarantee us. * Useful for resource counting etc. - * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. + * s390 uses 'Compare And Swap' for atomicity in SMP environment. * */ +#ifndef __ARCH_S390_ATOMIC__ +#define __ARCH_S390_ATOMIC__ + #include <linux/compiler.h> #include <linux/types.h> -#include <asm/system.h> +#include <asm/barrier.h> +#include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } -#define __CS_LOOP(ptr, op_val, op_string) ({ \ +#define __ATOMIC_NO_BARRIER "\n" + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __ATOMIC_OR "lao" +#define __ATOMIC_AND "lan" +#define __ATOMIC_ADD "laa" +#define __ATOMIC_BARRIER "bcr 14,0\n" + +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ + int old_val; \ + \ + typecheck(atomic_t *, ptr); \ + asm volatile( \ + __barrier \ + op_string " %0,%2,%1\n" \ + __barrier \ + : "=d" (old_val), "+Q" ((ptr)->counter) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define __ATOMIC_OR "or" +#define __ATOMIC_AND "nr" +#define __ATOMIC_ADD "ar" +#define __ATOMIC_BARRIER "\n" + +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ int old_val, new_val; \ + \ + typecheck(atomic_t *, ptr); \ asm volatile( \ " l %0,%2\n" \ "0: lr %1,%0\n" \ op_string " %1,%3\n" \ " cs %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=Q" (((atomic_t *)(ptr))->counter) \ - : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ + : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ + : "d" (op_val) \ : "cc", "memory"); \ - new_val; \ + old_val; \ }) +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + static inline int atomic_read(const atomic_t *v) { - barrier(); - return v->counter; + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic_set(atomic_t *v, int i) { - v->counter = i; - barrier(); + asm volatile( + " st %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) { - return __CS_LOOP(v, i, "ar"); + return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; } -#define atomic_add(_i, _v) atomic_add_return(_i, _v) -#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) -#define atomic_inc(_v) atomic_add_return(1, _v) -#define atomic_inc_return(_v) atomic_add_return(1, _v) -#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) -static inline int atomic_sub_return(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - return __CS_LOOP(v, i, "sr"); +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + asm volatile( + "asi %0,%1\n" + : "+Q" (v->counter) + : "i" (i) + : "cc", "memory"); + return; + } +#endif + __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); } -#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) + +#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) +#define atomic_inc(_v) atomic_add(1, _v) +#define atomic_inc_return(_v) atomic_add_return(1, _v) +#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) +#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) +#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) -#define atomic_dec(_v) atomic_sub_return(1, _v) +#define atomic_dec(_v) atomic_sub(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - __CS_LOOP(v, ~mask, "nr"); + __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); } -static inline void atomic_set_mask(unsigned long mask, atomic_t *v) +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - __CS_LOOP(v, mask, "or"); + __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -82,13 +134,13 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { asm volatile( " cs %0,%2,%1" - : "+d" (old), "=Q" (v->counter) - : "d" (new), "Q" (v->counter) + : "+d" (old), "+Q" (v->counter) + : "d" (new) : "cc", "memory"); return old; } -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -100,62 +152,111 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) break; c = old; } - return c != u; + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#undef __CS_LOOP +#undef __ATOMIC_LOOP #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_64BIT -#define __CSG_LOOP(ptr, op_val, op_string) ({ \ +#define __ATOMIC64_NO_BARRIER "\n" + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __ATOMIC64_OR "laog" +#define __ATOMIC64_AND "lang" +#define __ATOMIC64_ADD "laag" +#define __ATOMIC64_BARRIER "bcr 14,0\n" + +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ + long long old_val; \ + \ + typecheck(atomic64_t *, ptr); \ + asm volatile( \ + __barrier \ + op_string " %0,%2,%1\n" \ + __barrier \ + : "=d" (old_val), "+Q" ((ptr)->counter) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define __ATOMIC64_OR "ogr" +#define __ATOMIC64_AND "ngr" +#define __ATOMIC64_ADD "agr" +#define __ATOMIC64_BARRIER "\n" + +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ long long old_val, new_val; \ + \ + typecheck(atomic64_t *, ptr); \ asm volatile( \ " lg %0,%2\n" \ "0: lgr %1,%0\n" \ op_string " %1,%3\n" \ " csg %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=Q" (((atomic_t *)(ptr))->counter) \ - : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ + : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ + : "d" (op_val) \ : "cc", "memory"); \ - new_val; \ + old_val; \ }) +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + static inline long long atomic64_read(const atomic64_t *v) { - barrier(); - return v->counter; + long long c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic64_set(atomic64_t *v, long long i) { - v->counter = i; - barrier(); + asm volatile( + " stg %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline long long atomic64_add_return(long long i, atomic64_t *v) { - return __CSG_LOOP(v, i, "agr"); + return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; } -static inline long long atomic64_sub_return(long long i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v) { - return __CSG_LOOP(v, i, "sgr"); +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + asm volatile( + "agsi %0,%1\n" + : "+Q" (v->counter) + : "i" (i) + : "cc", "memory"); + return; + } +#endif + __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); } static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, ~mask, "ngr"); + __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); } static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, mask, "ogr"); + __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); } #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) @@ -165,13 +266,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, { asm volatile( " csg %0,%2,%1" - : "+d" (old), "=Q" (v->counter) - : "d" (new), "Q" (v->counter) + : "+d" (old), "+Q" (v->counter) + : "d" (new) : "cc", "memory"); return old; } -#undef __CSG_LOOP +#undef __ATOMIC64_LOOP #else /* CONFIG_64BIT */ @@ -207,8 +308,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) " lm %0,%N0,%1\n" "0: cds %0,%2,%1\n" " jl 0b\n" - : "=&d" (rp_old), "=Q" (v->counter) - : "d" (rp_new), "Q" (v->counter) + : "=&d" (rp_old), "+Q" (v->counter) + : "d" (rp_new) : "cc"); return rp_old.pair; } @@ -221,8 +322,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, asm volatile( " cds %0,%2,%1" - : "+&d" (rp_old), "=Q" (v->counter) - : "d" (rp_new), "Q" (v->counter) + : "+&d" (rp_old), "+Q" (v->counter) + : "d" (rp_new) : "cc"); return rp_old.pair; } @@ -239,17 +340,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) return new; } -static inline long long atomic64_sub_return(long long i, atomic64_t *v) -{ - long long old, new; - - do { - old = atomic64_read(v); - new = old - i; - } while (atomic64_cmpxchg(v, old, new) != old); - return new; -} - static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) { long long old, new; @@ -270,9 +360,14 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) } while (atomic64_cmpxchg(v, old, new) != old); } +static inline void atomic64_add(long long i, atomic64_t *v) +{ + atomic64_add_return(i, v); +} + #endif /* CONFIG_64BIT */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) { long long c, old; @@ -280,7 +375,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) for (;;) { if (unlikely(c == u)) break; - old = atomic64_cmpxchg(v, c, c + a); + old = atomic64_cmpxchg(v, c, c + i); if (likely(old == c)) break; c = old; @@ -305,23 +400,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return dec; } -#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) -#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc(_v) atomic64_add(1, _v) #define atomic64_inc_return(_v) atomic64_add_return(1, _v) #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) -#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) +#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) -#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec(_v) atomic64_sub(1, _v) #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - -#include <asm-generic/atomic-long.h> - #endif /* __ARCH_S390_ATOMIC__ */ |
