aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
-rw-r--r--arch/mips/include/asm/atomic.h322
1 files changed, 144 insertions, 178 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index dd75d673447..37b2befe651 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -18,10 +18,10 @@
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cpu-features.h>
+#include <asm/cmpxchg.h>
#include <asm/war.h>
-#include <asm/system.h>
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_INIT(i) { (i) }
/*
* atomic_read - read atomic variable
@@ -29,7 +29,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
/*
* atomic_set - set atomic variable
@@ -53,29 +53,27 @@ static __inline__ void atomic_add(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " ll %0, %1 # atomic_add \n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -98,29 +96,27 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " ll %0, %1 # atomic_sub \n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -137,39 +133,36 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{
int result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " addu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -189,13 +182,13 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
int result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
@@ -205,23 +198,23 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+
+ result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 2f \n"
- " subu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " ll %1, %2 # atomic_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -249,13 +242,13 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
int result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -266,30 +259,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else {
unsigned long flags;
@@ -310,15 +299,15 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -330,9 +319,8 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
@@ -410,7 +398,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) ((v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
/*
* atomic64_set - set atomic variable
@@ -432,29 +420,27 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_add \n"
- " addu %0, %2 \n"
+ " daddu %0, %2 \n"
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_add \n"
- " addu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " lld %0, %1 # atomic64_add \n"
+ " daddu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -477,29 +463,27 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_sub \n"
- " subu %0, %2 \n"
+ " dsubu %0, %2 \n"
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %0, %1 # atomic64_sub \n"
- " subu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 2f \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " lld %0, %1 # atomic64_sub \n"
+ " dsubu %0, %2 \n"
+ " scd %0, %1 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
+ } while (unlikely(!temp));
} else {
unsigned long flags;
@@ -516,39 +500,37 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
long result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_add_return \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
+ " daddu %0, %1, %3 \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_add_return \n"
- " addu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " addu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " lld %1, %2 # atomic64_add_return \n"
+ " daddu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp + i;
} else {
unsigned long flags;
@@ -568,18 +550,18 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
long result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_return \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
+ " dsubu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
@@ -587,20 +569,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) {
long temp;
- __asm__ __volatile__(
- " .set mips3 \n"
- "1: lld %1, %2 # atomic64_sub_return \n"
- " subu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 2f \n"
- " subu %0, %1, %3 \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
- " .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ do {
+ __asm__ __volatile__(
+ " .set arch=r4000 \n"
+ " lld %1, %2 # atomic64_sub_return \n"
+ " dsubu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } while (unlikely(!result));
+
+ result = temp - i;
} else {
unsigned long flags;
@@ -628,13 +609,13 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
long result;
- smp_llsc_mb();
+ smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -652,23 +633,19 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
" .set noreorder \n"
- " beqz %0, 2f \n"
+ " beqz %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set reorder \n"
- " .subsection 2 \n"
- "2: b 1b \n"
- " .previous \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "Ir" (i));
} else {
unsigned long flags;
@@ -696,7 +673,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
@@ -784,15 +761,4 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#endif /* CONFIG_64BIT */
-/*
- * atomic*_return operations are serializing but not the non-*_return
- * versions.
- */
-#define smp_mb__before_atomic_dec() smp_llsc_mb()
-#define smp_mb__after_atomic_dec() smp_llsc_mb()
-#define smp_mb__before_atomic_inc() smp_llsc_mb()
-#define smp_mb__after_atomic_inc() smp_llsc_mb()
-
-#include <asm-generic/atomic-long.h>
-
#endif /* _ASM_ATOMIC_H */