aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 09:02:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 09:02:01 -0800
commit8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch)
treeb907c35c79caadafff6ad46a91614e30afd2f967 /arch/mips/include
parent050cbb09dac0402672edeaeac06094ef8ff1749a (diff)
parentb5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff)
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
Diffstat (limited to 'arch/mips/include')
-rw-r--r--arch/mips/include/asm/spinlock.h78
-rw-r--r--arch/mips/include/asm/spinlock_types.h8
2 files changed, 43 insertions, 43 deletions
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 5b60a09a0f0..21ef9efbde4 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -34,33 +34,33 @@
* becomes equal to the the initial value of the tail.
*/
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return ((counters >> 14) ^ counters) & 0x1fff;
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-#define __raw_spin_unlock_wait(x) \
- while (__raw_spin_is_locked(x)) { cpu_relax(); }
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ while (arch_spin_is_locked(x)) { cpu_relax(); }
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
unsigned int counters = ACCESS_ONCE(lock->lock);
return (((counters >> 14) - counters) & 0x1fff) > 1;
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
[my_ticket] "=&r" (my_ticket));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_lock \n"
+ " .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
smp_llsc_mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
int tmp;
@@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " # __raw_spin_unlock \n"
+ " # arch_spin_unlock \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addiu %[ticket], %[ticket], 1 \n"
" ori %[ticket], %[ticket], 0x2000 \n"
@@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
[ticket] "=&r" (tmp));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_unlock \n"
+ " .set push # arch_spin_unlock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
}
}
-static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
@@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
[now_serving] "=&r" (tmp3));
} else {
__asm__ __volatile__ (
- " .set push # __raw_spin_trylock \n"
+ " .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
" ll %[ticket], %[ticket_ptr] \n"
@@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(rw) ((rw)->lock >= 0)
+#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
@@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_lock \n"
+ " .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
@@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
/* Note the use of sub, not subu which will make the kernel die with an
overflow exception if we ever try to unlock an rwlock that is already
unlocked or is being held by a writer. */
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %1, %2 # __raw_read_unlock \n"
+ "1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
@@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_unlock \n"
+ " .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
@@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
}
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
@@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_lock \n"
+ " .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
@@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
smp_llsc_mb();
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();
__asm__ __volatile__(
- " # __raw_write_unlock \n"
+ " # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_read_trylock \n"
+ " .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
@@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
: "memory");
} else {
__asm__ __volatile__(
- " .set noreorder # __raw_write_trylock \n"
+ " .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
@@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
return ret;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index adeedaa116c..ee197c2f9c9 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -12,14 +12,14 @@ typedef struct {
* bits 15..28: ticket
*/
unsigned int lock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int lock;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif