diff options
Diffstat (limited to 'arch/arm64/include/asm/atomic.h')
| -rw-r--r-- | arch/arm64/include/asm/atomic.h | 74 | 
1 files changed, 31 insertions, 43 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 83636446857..65f1569ac96 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)  "	stxr	%w1, %w0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -	: "Ir" (i) -	: "cc"); +	: "Ir" (i));  }  static inline int atomic_add_return(int i, atomic_t *v) @@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)  	int result;  	asm volatile("// atomic_add_return\n" -"1:	ldaxr	%w0, %2\n" +"1:	ldxr	%w0, %2\n"  "	add	%w0, %w0, %w3\n"  "	stlxr	%w1, %w0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)  	: "Ir" (i) -	: "cc", "memory"); +	: "memory"); +	smp_mb();  	return result;  } @@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)  "	stxr	%w1, %w0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -	: "Ir" (i) -	: "cc"); +	: "Ir" (i));  }  static inline int atomic_sub_return(int i, atomic_t *v) @@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)  	int result;  	asm volatile("// atomic_sub_return\n" -"1:	ldaxr	%w0, %2\n" +"1:	ldxr	%w0, %2\n"  "	sub	%w0, %w0, %w3\n"  "	stlxr	%w1, %w0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)  	: "Ir" (i) -	: "cc", "memory"); +	: "memory"); +	smp_mb();  	return result;  } @@ -112,34 +112,23 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)  	unsigned long tmp;  	int oldval; +	smp_mb(); +  	asm volatile("// atomic_cmpxchg\n" -"1:	ldaxr	%w1, %2\n" +"1:	ldxr	%w1, %2\n"  "	cmp	%w1, %w3\n"  "	b.ne	2f\n" -"	stlxr	%w0, %w4, %2\n" +"	stxr	%w0, %w4, %2\n"  "	cbnz	%w0, 1b\n"  "2:"  	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)  	: "Ir" (old), "r" (new) -	: "cc", "memory"); +	: "cc"); +	smp_mb();  	return oldval;  } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ -	unsigned long tmp, tmp2; - -	asm volatile("// atomic_clear_mask\n" -"1:	ldxr	%0, %2\n" -"	bic	%0, %0, %3\n" -"	stxr	%w1, %0, %2\n" -"	cbnz	%w1, 1b" -	: "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) -	: "Ir" (mask) -	: "cc"); -} -  #define atomic_xchg(v, new) (xchg(&((v)->counter), new))  static inline int __atomic_add_unless(atomic_t *v, int a, int u) @@ -163,17 +152,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)  #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) -#define smp_mb__before_atomic_dec()	smp_mb() -#define smp_mb__after_atomic_dec()	smp_mb() -#define smp_mb__before_atomic_inc()	smp_mb() -#define smp_mb__after_atomic_inc()	smp_mb() -  /*   * 64-bit atomic operations.   */  #define ATOMIC64_INIT(i) { (i) } -#define atomic64_read(v)	(*(volatile long long *)&(v)->counter) +#define atomic64_read(v)	(*(volatile long *)&(v)->counter)  #define atomic64_set(v,i)	(((v)->counter) = (i))  static inline void atomic64_add(u64 i, atomic64_t *v) @@ -187,8 +171,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)  "	stxr	%w1, %0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -	: "Ir" (i) -	: "cc"); +	: "Ir" (i));  }  static inline long atomic64_add_return(long i, atomic64_t *v) @@ -197,14 +180,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)  	unsigned long tmp;  	asm volatile("// atomic64_add_return\n" -"1:	ldaxr	%0, %2\n" +"1:	ldxr	%0, %2\n"  "	add	%0, %0, %3\n"  "	stlxr	%w1, %0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)  	: "Ir" (i) -	: "cc", "memory"); +	: "memory"); +	smp_mb();  	return result;  } @@ -219,8 +203,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)  "	stxr	%w1, %0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) -	: "Ir" (i) -	: "cc"); +	: "Ir" (i));  }  static inline long atomic64_sub_return(long i, atomic64_t *v) @@ -229,14 +212,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)  	unsigned long tmp;  	asm volatile("// atomic64_sub_return\n" -"1:	ldaxr	%0, %2\n" +"1:	ldxr	%0, %2\n"  "	sub	%0, %0, %3\n"  "	stlxr	%w1, %0, %2\n"  "	cbnz	%w1, 1b"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)  	: "Ir" (i) -	: "cc", "memory"); +	: "memory"); +	smp_mb();  	return result;  } @@ -245,17 +229,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)  	long oldval;  	unsigned long res; +	smp_mb(); +  	asm volatile("// atomic64_cmpxchg\n" -"1:	ldaxr	%1, %2\n" +"1:	ldxr	%1, %2\n"  "	cmp	%1, %3\n"  "	b.ne	2f\n" -"	stlxr	%w0, %4, %2\n" +"	stxr	%w0, %4, %2\n"  "	cbnz	%w0, 1b\n"  "2:"  	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)  	: "Ir" (old), "r" (new) -	: "cc", "memory"); +	: "cc"); +	smp_mb();  	return oldval;  } @@ -267,11 +254,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)  	unsigned long tmp;  	asm volatile("// atomic64_dec_if_positive\n" -"1:	ldaxr	%0, %2\n" +"1:	ldxr	%0, %2\n"  "	subs	%0, %0, #1\n"  "	b.mi	2f\n"  "	stlxr	%w1, %0, %2\n"  "	cbnz	%w1, 1b\n" +"	dmb	ish\n"  "2:"  	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)  	:  | 
