aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/locks.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib/locks.c')
-rw-r--r--arch/powerpc/lib/locks.c38
1 files changed, 12 insertions, 26 deletions
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 79d0fa3a470..0c9c8d7d073 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -14,18 +14,16 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/stringify.h>
#include <linux/smp.h>
/* waiting for a spinlock... */
-#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_PPC_SPLPAR)
#include <asm/hvcall.h>
-#include <asm/iseries/hv_call.h>
#include <asm/smp.h>
-#include <asm/firmware.h>
-void __spin_yield(raw_spinlock_t *lock)
+void __spin_yield(arch_spinlock_t *lock)
{
unsigned int lock_value, holder_cpu, yield_count;
@@ -34,20 +32,14 @@ void __spin_yield(raw_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = lppaca[holder_cpu].yield_count;
+ yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (lock->slock != lock_value)
return; /* something has changed */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
- ((u64)holder_cpu << 32) | yield_count);
-#ifdef CONFIG_PPC_SPLPAR
- else
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
-#endif
+ plpar_hcall_norets(H_CONFER,
+ get_hard_smp_processor_id(holder_cpu), yield_count);
}
/*
@@ -55,7 +47,7 @@ void __spin_yield(raw_spinlock_t *lock)
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
-void __rw_yield(raw_rwlock_t *rw)
+void __rw_yield(arch_rwlock_t *rw)
{
int lock_value;
unsigned int holder_cpu, yield_count;
@@ -65,24 +57,18 @@ void __rw_yield(raw_rwlock_t *rw)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = lppaca[holder_cpu].yield_count;
+ yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (rw->lock != lock_value)
return; /* something has changed */
- if (firmware_has_feature(FW_FEATURE_ISERIES))
- HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
- ((u64)holder_cpu << 32) | yield_count);
-#ifdef CONFIG_PPC_SPLPAR
- else
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
-#endif
+ plpar_hcall_norets(H_CONFER,
+ get_hard_smp_processor_id(holder_cpu), yield_count);
}
#endif
-void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (lock->slock) {
HMT_low();
@@ -92,4 +78,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock)
HMT_medium();
}
-EXPORT_SYMBOL(__raw_spin_unlock_wait);
+EXPORT_SYMBOL(arch_spin_unlock_wait);