diff options
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
| -rw-r--r-- | arch/mips/include/asm/atomic.h | 51 |
1 files changed, 21 insertions, 30 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 08b607969a1..37b2befe651 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -1,5 +1,5 @@ /* - * Atomic operations that C can't guarantee us. Useful for + * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * But use these as seldom as possible since they are much more slower @@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %0, %1 # atomic_add \n" " addu %0, %2 \n" " sc %0, %1 \n" @@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " ll %0, %1 # atomic_add \n" " addu %0, %2 \n" " sc %0, %1 \n" @@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %0, %1 # atomic_sub \n" " subu %0, %2 \n" " sc %0, %1 \n" @@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " ll %0, %1 # atomic_sub \n" " subu %0, %2 \n" " sc %0, %1 \n" @@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %1, %2 # atomic_add_return \n" " addu %0, %1, %3 \n" " sc %0, %2 \n" @@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " ll %1, %2 # atomic_add_return \n" " addu %0, %1, %3 \n" " sc %0, %2 \n" @@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %1, %2 # atomic_sub_return \n" " subu %0, %1, %3 \n" " sc %0, %2 \n" @@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " ll %1, %2 # atomic_sub_return \n" " subu %0, %1, %3 \n" " sc %0, %2 \n" @@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %0, %1 # atomic64_add \n" " daddu %0, %2 \n" " scd %0, %1 \n" @@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " lld %0, %1 # atomic64_add \n" " daddu %0, %2 \n" " scd %0, %1 \n" @@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %0, %1 # atomic64_sub \n" " dsubu %0, %2 \n" " scd %0, %1 \n" @@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " lld %0, %1 # atomic64_sub \n" " dsubu %0, %2 \n" " scd %0, %1 \n" @@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %1, %2 # atomic64_add_return \n" " daddu %0, %1, %3 \n" " scd %0, %2 \n" @@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " lld %1, %2 # atomic64_add_return \n" " daddu %0, %1, %3 \n" " scd %0, %2 \n" @@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %1, %2 # atomic64_sub_return \n" " dsubu %0, %1, %3 \n" " scd %0, %2 \n" @@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) do { __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" " lld %1, %2 # atomic64_sub_return \n" " dsubu %0, %1, %3 \n" " scd %0, %2 \n" @@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set mips3 \n" + " .set arch=r4000 \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -761,13 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* CONFIG_64BIT */ -/* - * atomic*_return operations are serializing but not the non-*_return - * versions. - */ -#define smp_mb__before_atomic_dec() smp_mb__before_llsc() -#define smp_mb__after_atomic_dec() smp_llsc_mb() -#define smp_mb__before_atomic_inc() smp_mb__before_llsc() -#define smp_mb__after_atomic_inc() smp_llsc_mb() - #endif /* _ASM_ATOMIC_H */ |
