aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/lib/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r--arch/s390/lib/spinlock.c198
1 files changed, 150 insertions, 48 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 2dc14e9c832..5b0e445bc3f 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -1,9 +1,7 @@
/*
- * arch/s390/lib/spinlock.c
* Out of line spinlock code.
*
- * S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004, 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -11,9 +9,9 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/init.h>
+#include <linux/smp.h>
#include <asm/io.h>
-atomic_t spin_retry_counter;
int spin_retry = 1000;
/**
@@ -26,106 +24,210 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
-static inline void
-_diag44(void)
+void arch_spin_lock_wait(arch_spinlock_t *lp)
{
-#ifdef __s390x__
- if (MACHINE_HAS_DIAG44)
-#endif
- asm volatile("diag 0,0,0x44");
+ unsigned int cpu = SPINLOCK_LOCKVAL;
+ unsigned int owner;
+ int count;
+
+ while (1) {
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ continue;
+ }
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
+ smp_yield_cpu(~owner);
+ continue;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
+ smp_yield_cpu(~owner);
+ }
}
+EXPORT_SYMBOL(arch_spin_lock_wait);
-void
-_raw_spin_lock_wait(raw_spinlock_t *lp, unsigned int pc)
+void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
- int count = spin_retry;
+ unsigned int cpu = SPINLOCK_LOCKVAL;
+ unsigned int owner;
+ int count;
+ local_irq_restore(flags);
while (1) {
- if (count-- <= 0) {
- _diag44();
- count = spin_retry;
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ local_irq_disable();
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ local_irq_restore(flags);
}
- atomic_inc(&spin_retry_counter);
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
- return;
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
+ smp_yield_cpu(~owner);
+ continue;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
+ smp_yield_cpu(~owner);
}
}
-EXPORT_SYMBOL(_raw_spin_lock_wait);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-int
-_raw_spin_trylock_retry(raw_spinlock_t *lp, unsigned int pc)
+void arch_spin_relax(arch_spinlock_t *lp)
{
- int count = spin_retry;
+ unsigned int cpu = lp->lock;
+ if (cpu != 0) {
+ if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+ !smp_vcpu_scheduled(~cpu))
+ smp_yield_cpu(~cpu);
+ }
+}
+EXPORT_SYMBOL(arch_spin_relax);
- while (count-- > 0) {
- atomic_inc(&spin_retry_counter);
- if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
+{
+ int count;
+
+ for (count = spin_retry; count > 0; count--)
+ if (arch_spin_trylock_once(lp))
return 1;
- }
return 0;
}
-EXPORT_SYMBOL(_raw_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_trylock_retry);
-void
-_raw_read_lock_wait(raw_rwlock_t *rw)
+void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ smp_yield();
count = spin_retry;
}
- atomic_inc(&spin_retry_counter);
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
+ continue;
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
}
EXPORT_SYMBOL(_raw_read_lock_wait);
-int
-_raw_read_trylock_retry(raw_rwlock_t *rw)
+void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
+{
+ unsigned int old;
+ int count = spin_retry;
+
+ local_irq_restore(flags);
+ while (1) {
+ if (count-- <= 0) {
+ smp_yield();
+ count = spin_retry;
+ }
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
+ continue;
+ local_irq_disable();
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
+ return;
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(_raw_read_lock_wait_flags);
+
+int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- atomic_inc(&spin_retry_counter);
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
+ continue;
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
return 0;
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
-void
-_raw_write_lock_wait(raw_rwlock_t *rw)
+void _raw_write_lock_wait(arch_rwlock_t *rw)
{
+ unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _diag44();
+ smp_yield();
count = spin_retry;
}
- atomic_inc(&spin_retry_counter);
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
+ continue;
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
-int
-_raw_write_trylock_retry(raw_rwlock_t *rw)
+void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
+ unsigned int old;
+ int count = spin_retry;
+
+ local_irq_restore(flags);
+ while (1) {
+ if (count-- <= 0) {
+ smp_yield();
+ count = spin_retry;
+ }
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
+ continue;
+ local_irq_disable();
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
+ return;
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(_raw_write_lock_wait_flags);
+
+int _raw_write_trylock_retry(arch_rwlock_t *rw)
+{
+ unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- atomic_inc(&spin_retry_counter);
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
+ continue;
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
return 0;