aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h149
1 files changed, 72 insertions, 77 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index b4ca707d0a6..ac4bfae2670 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
-#include <asm/processor.h>
+#include <linux/prefetch.h>
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
-#define ALT_SMP(smp, up) \
- "9998: " smp "\n" \
- " .pushsection \".alt.smp.init\", \"a\"\n" \
- " .long 9998b\n" \
- " " up "\n" \
- " .popsection\n"
-
#ifdef CONFIG_THUMB2_KERNEL
-#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
@@ -31,31 +23,23 @@
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
-#define WFE(cond) ALT_SMP( \
+#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
-#define SEV ALT_SMP("sev", "nop")
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
@@ -77,6 +61,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
u32 newval;
arch_spinlock_t lockval;
+ prefetchw(&lock->slock);
__asm__ __volatile__(
"1: ldrex %0, [%3]\n"
" add %1, %0, %4\n"
@@ -97,19 +82,23 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long contended, res;
u32 slock;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" subs %1, %0, %0, ror #16\n"
-" addeq %0, %0, %3\n"
-" strexeq %1, %0, [%2]"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
- : "cc");
-
- if (tmp == 0) {
+ prefetchw(&lock->slock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%3]\n"
+ " mov %2, #0\n"
+ " subs %1, %0, %0, ror #16\n"
+ " addeq %0, %0, %4\n"
+ " strexeq %2, %0, [%3]"
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -119,29 +108,19 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- unsigned long tmp;
- u32 slock;
-
smp_mb();
-
- __asm__ __volatile__(
-" mov %1, #1\n"
-"1: ldrex %0, [%2]\n"
-" uadd16 %0, %0, %1\n"
-" strex %1, %0, [%2]\n"
-" teq %1, #0\n"
-" bne 1b"
- : "=&r" (slock), "=&r" (tmp)
- : "r" (&lock->slock)
- : "cc");
-
+ lock->tickets.owner++;
dsb_sev();
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.owner == lock.tickets.next;
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
- return tickets.owner != tickets.next;
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -163,6 +142,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
@@ -179,17 +159,21 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
-
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
-
- if (tmp == 0) {
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
+
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -211,7 +195,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
}
/* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->lock == 0)
+#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
/*
* Read locks are a bit more hairy:
@@ -229,6 +213,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
@@ -249,6 +234,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
smp_mb();
+ prefetchw(&rw->lock);
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -265,22 +251,31 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
-
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
-
- smp_mb();
- return tmp2 == 0;
+ unsigned long contended, res;
+
+ prefetchw(&rw->lock);
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
+
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)