aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/spinlock.h')
-rw-r--r--arch/powerpc/include/asm/spinlock.h55
1 files changed, 35 insertions, 20 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 764094cff68..35aa339410b 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -23,17 +23,21 @@
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
#endif
#include <asm/asm-compat.h>
#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
-#define arch_spin_is_locked(x) ((x)->slock != 0)
+#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
#define LOCK_TOKEN 1
#endif
@@ -50,6 +54,16 @@
#define SYNC_IO
#endif
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(*lock);
+}
+
/*
* This returns the old value in the lock, so we succeeded
* in getting the lock if the return value is 0.
@@ -60,13 +74,14 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2\n\
+"1: " PPC_LWARX(%0,0,%2,1) "\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp)
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:"
+ : "=&r" (tmp)
: "r" (token), "r" (&lock->slock)
: "cr0", "memory");
@@ -93,12 +108,12 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
* value.
*/
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR || ISERIES */
+#else /* SPLPAR */
#define __spin_yield(x) barrier()
#define __rw_yield(x) barrier()
#define SHARED_PROCESSOR 0
@@ -144,7 +159,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
SYNC_IO;
__asm__ __volatile__("# arch_spin_unlock\n\t"
- LWSYNC_ON_SMP: : :"memory");
+ PPC_RELEASE_BARRIER: : :"memory");
lock->slock = 0;
}
@@ -186,15 +201,15 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1\n"
+"1: " PPC_LWARX(%0,0,%1,1) "\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp)
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
: "r" (&rw->lock)
: "cr0", "xer", "memory");
@@ -211,14 +226,14 @@ static inline long __arch_write_trylock(arch_rwlock_t *rw)
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2\n\
+"1: " PPC_LWARX(%0,0,%2,1) "\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
PPC405_ERR77(0,%1)
" stwcx. %1,0,%2\n\
- bne- 1b\n\
- isync\n\
-2:" : "=&r" (tmp)
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
: "r" (token), "r" (&rw->lock)
: "cr0", "memory");
@@ -269,7 +284,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
__asm__ __volatile__(
"# read_unlock\n\t"
- LWSYNC_ON_SMP
+ PPC_RELEASE_BARRIER
"1: lwarx %0,0,%1\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
@@ -283,7 +298,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("# write_unlock\n\t"
- LWSYNC_ON_SMP: : :"memory");
+ PPC_RELEASE_BARRIER: : :"memory");
rw->lock = 0;
}