aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2005-09-10 00:25:56 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-10 10:06:21 -0700
commitfb1c8f93d869b34cacb8b8932e2b83d96a19d720 (patch)
treea006d078aa02e421a7dc4793c335308204859d36 /include
parent4327edf6b8a7ac7dce144313947995538842d8fd (diff)
[PATCH] spinlock consolidation
This patch (written by me and also containing many suggestions of Arjan van de Ven) does a major cleanup of the spinlock code. It does the following things: - consolidates and enhances the spinlock/rwlock debugging code - simplifies the asm/spinlock.h files - encapsulates the raw spinlock type and moves generic spinlock features (such as ->break_lock) into the generic code. - cleans up the spinlock code hierarchy to get rid of the spaghetti. Most notably there's now only a single variant of the debugging code, located in lib/spinlock_debug.c. (previously we had one SMP debugging variant per architecture, plus a separate generic one for UP builds) Also, i've enhanced the rwlock debugging facility, it will now track write-owners. There is new spinlock-owner/CPU-tracking on SMP builds too. All locks have lockup detection now, which will work for both soft and hard spin/rwlock lockups. The arch-level include files now only contain the minimally necessary subset of the spinlock code - all the rest that can be generalized now lives in the generic headers: include/asm-i386/spinlock_types.h | 16 include/asm-x86_64/spinlock_types.h | 16 I have also split up the various spinlock variants into separate files, making it easier to see which does what. The new layout is: SMP | UP ----------------------------|----------------------------------- asm/spinlock_types_smp.h | linux/spinlock_types_up.h linux/spinlock_types.h | linux/spinlock_types.h asm/spinlock_smp.h | linux/spinlock_up.h linux/spinlock_api_smp.h | linux/spinlock_api_up.h linux/spinlock.h | linux/spinlock.h /* * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. */ All SMP and UP architectures are converted by this patch. arm, i386, ia64, ppc, ppc64, s390/s390x, x64 was build-tested via crosscompilers. m32r, mips, sh, sparc, have not been tested yet, but should be mostly fine. From: Grant Grundler <grundler@parisc-linux.org> Booted and lightly tested on a500-44 (64-bit, SMP kernel, dual CPU). Builds 32-bit SMP kernel (not booted or tested). I did not try to build non-SMP kernels. That should be trivial to fix up later if necessary. I converted bit ops atomic_hash lock to raw_spinlock_t. Doing so avoids some ugly nesting of linux/*.h and asm/*.h files. Those particular locks are well tested and contained entirely inside arch specific code. I do NOT expect any new issues to arise with them. If someone does ever need to use debug/metrics with them, then they will need to unravel this hairball between spinlocks, atomic ops, and bit ops that exist only because parisc has exactly one atomic instruction: LDCW (load and clear word). From: "Luck, Tony" <tony.luck@intel.com> ia64 fix Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjanv@infradead.org> Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Mikael Pettersson <mikpe@csd.uu.se> Signed-off-by: Benoit Boissinot <benoit.boissinot@ens-lyon.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/spinlock.h96
-rw-r--r--include/asm-alpha/spinlock_types.h20
-rw-r--r--include/asm-arm/spinlock.h50
-rw-r--r--include/asm-arm/spinlock_types.h20
-rw-r--r--include/asm-i386/spinlock.h200
-rw-r--r--include/asm-i386/spinlock_types.h20
-rw-r--r--include/asm-ia64/spinlock.h69
-rw-r--r--include/asm-ia64/spinlock_types.h21
-rw-r--r--include/asm-m32r/spinlock.h127
-rw-r--r--include/asm-m32r/spinlock_types.h23
-rw-r--r--include/asm-mips/spinlock.h75
-rw-r--r--include/asm-mips/spinlock_types.h20
-rw-r--r--include/asm-parisc/atomic.h12
-rw-r--r--include/asm-parisc/bitops.h2
-rw-r--r--include/asm-parisc/cacheflush.h1
-rw-r--r--include/asm-parisc/processor.h1
-rw-r--r--include/asm-parisc/spinlock.h163
-rw-r--r--include/asm-parisc/spinlock_types.h21
-rw-r--r--include/asm-parisc/system.h24
-rw-r--r--include/asm-ppc/spinlock.h91
-rw-r--r--include/asm-ppc/spinlock_types.h20
-rw-r--r--include/asm-ppc64/spinlock.h191
-rw-r--r--include/asm-ppc64/spinlock_types.h20
-rw-r--r--include/asm-s390/spinlock.h63
-rw-r--r--include/asm-s390/spinlock_types.h21
-rw-r--r--include/asm-sh/spinlock.h61
-rw-r--r--include/asm-sh/spinlock_types.h22
-rw-r--r--include/asm-sparc/spinlock.h140
-rw-r--r--include/asm-sparc/spinlock_types.h20
-rw-r--r--include/asm-sparc64/spinlock.h160
-rw-r--r--include/asm-sparc64/spinlock_types.h20
-rw-r--r--include/asm-x86_64/spinlock.h164
-rw-r--r--include/asm-x86_64/spinlock_types.h20
-rw-r--r--include/linux/bit_spinlock.h77
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/spinlock.h627
-rw-r--r--include/linux/spinlock_api_smp.h57
-rw-r--r--include/linux/spinlock_api_up.h80
-rw-r--r--include/linux/spinlock_types.h67
-rw-r--r--include/linux/spinlock_types_up.h51
-rw-r--r--include/linux/spinlock_up.h74
41 files changed, 1289 insertions, 1723 deletions
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
index 80780dba998..8197c69eff4 100644
--- a/include/asm-alpha/spinlock.h
+++ b/include/asm-alpha/spinlock.h
@@ -6,7 +6,6 @@
#include <linux/kernel.h>
#include <asm/current.h>
-
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
@@ -14,43 +13,18 @@
* We make no fairness assumptions. They have a cost.
*/
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- int on_cpu;
- int line_no;
- void *previous;
- struct task_struct * task;
- const char *base_file;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
-#else
-#define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 }
-#endif
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-extern void _raw_spin_unlock(spinlock_t * lock);
-extern void debug_spin_lock(spinlock_t * lock, const char *, int);
-extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
-#define _raw_spin_lock(LOCK) \
- debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
-#define _raw_spin_trylock(LOCK) \
- debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
-#else
-static inline void _raw_spin_unlock(spinlock_t * lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_unlock_wait(x) \
+ do { cpu_relax(); } while ((x)->lock)
+
+static inline void __raw_spin_unlock(raw_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
-static inline void _raw_spin_lock(spinlock_t * lock)
+static inline void __raw_spin_lock(raw_spinlock_t * lock)
{
long tmp;
@@ -70,80 +44,64 @@ static inline void _raw_spin_lock(spinlock_t * lock)
: "m"(lock->lock) : "memory");
}
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
/***********************************************************/
-typedef struct {
- volatile unsigned int lock;
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t){ 0 }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-static inline int read_can_lock(rwlock_t *lock)
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
-static inline int write_can_lock(rwlock_t *lock)
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
{
return lock->lock == 0;
}
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _raw_write_lock(rwlock_t * lock);
-extern void _raw_read_lock(rwlock_t * lock);
-#else
-static inline void _raw_write_lock(rwlock_t * lock)
+static inline void __raw_read_lock(raw_rwlock_t *lock)
{
long regx;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
- " bne %1,6f\n"
- " lda %1,1\n"
+ " blbs %1,6f\n"
+ " subl %1,2,%1\n"
" stl_c %1,%0\n"
" beq %1,6f\n"
" mb\n"
".subsection 2\n"
"6: ldl %1,%0\n"
- " bne %1,6b\n"
+ " blbs %1,6b\n"
" br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx)
: "m" (*lock) : "memory");
}
-static inline void _raw_read_lock(rwlock_t * lock)
+static inline void __raw_write_lock(raw_rwlock_t *lock)
{
long regx;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
- " blbs %1,6f\n"
- " subl %1,2,%1\n"
+ " bne %1,6f\n"
+ " lda %1,1\n"
" stl_c %1,%0\n"
" beq %1,6f\n"
" mb\n"
".subsection 2\n"
"6: ldl %1,%0\n"
- " blbs %1,6b\n"
+ " bne %1,6b\n"
" br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx)
: "m" (*lock) : "memory");
}
-#endif /* CONFIG_DEBUG_RWLOCK */
-static inline int _raw_read_trylock(rwlock_t * lock)
+static inline int __raw_read_trylock(raw_rwlock_t * lock)
{
long regx;
int success;
@@ -165,7 +123,7 @@ static inline int _raw_read_trylock(rwlock_t * lock)
return success;
}
-static inline int _raw_write_trylock(rwlock_t * lock)
+static inline int __raw_write_trylock(raw_rwlock_t * lock)
{
long regx;
int success;
@@ -187,13 +145,7 @@ static inline int _raw_write_trylock(rwlock_t * lock)
return success;
}
-static inline void _raw_write_unlock(rwlock_t * lock)
-{
- mb();
- lock->lock = 0;
-}
-
-static inline void _raw_read_unlock(rwlock_t * lock)
+static inline void __raw_read_unlock(raw_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
@@ -209,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock)
: "m" (*lock) : "memory");
}
+static inline void __raw_write_unlock(raw_rwlock_t * lock)
+{
+ mb();
+ lock->lock = 0;
+}
+
#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/spinlock_types.h b/include/asm-alpha/spinlock_types.h
new file mode 100644
index 00000000000..8141eb5ebf0
--- /dev/null
+++ b/include/asm-alpha/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ALPHA_SPINLOCK_TYPES_H
+#define _ALPHA_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 1f906d09b68..cb4906b4555 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -16,21 +16,14 @@
* Unlocked value: 0
* Locked value: 1
*/
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} spinlock_t;
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#define __raw_spin_is_locked(x) ((x)->lock != 0)
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
-#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
-static inline void _raw_spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
unsigned long tmp;
@@ -47,7 +40,7 @@ static inline void _raw_spin_lock(spinlock_t *lock)
smp_mb();
}
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
unsigned long tmp;
@@ -67,7 +60,7 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
}
}
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
smp_mb();
@@ -80,23 +73,14 @@ static inline void _raw_spin_unlock(spinlock_t *lock)
/*
* RWLOCKS
- */
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
-#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
-
-/*
+ *
+ *
* Write locks are easy - we just set bit 31. When unlocking, we can
* just write zero since the lock is exclusively held.
*/
-static inline void _raw_write_lock(rwlock_t *rw)
+#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
+
+static inline void __raw_write_lock(rwlock_t *rw)
{
unsigned long tmp;
@@ -113,7 +97,7 @@ static inline void _raw_write_lock(rwlock_t *rw)
smp_mb();
}
-static inline int _raw_write_trylock(rwlock_t *rw)
+static inline int __raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
@@ -133,7 +117,7 @@ static inline int _raw_write_trylock(rwlock_t *rw)
}
}
-static inline void _raw_write_unlock(rwlock_t *rw)
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
smp_mb();
@@ -156,7 +140,7 @@ static inline void _raw_write_unlock(rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -173,7 +157,7 @@ static inline void _raw_read_lock(rwlock_t *rw)
smp_mb();
}
-static inline void _raw_read_unlock(rwlock_t *rw)
+static inline void __raw_read_unlock(rwlock_t *rw)
{
unsigned long tmp, tmp2;
@@ -190,6 +174,6 @@ static inline void _raw_read_unlock(rwlock_t *rw)
: "cc");
}
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
+#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-arm/spinlock_types.h b/include/asm-arm/spinlock_types.h
new file mode 100644
index 00000000000..43e83f6d2ee
--- /dev/null
+++ b/include/asm-arm/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index f9ff31f4003..23604350cdf 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,46 +7,21 @@
#include <linux/config.h>
#include <linux/compiler.h>
-asmlinkage int printk(const char * fmt, ...)
- __attribute__ ((format (printf, 1, 2)));
-
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
- */
-
-typedef struct {
- volatile unsigned int slock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT /* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
-
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-/*
+ *
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
+ *
+ * (the type definitions are in asm/spinlock_types.h)
*/
-#define spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
+#define __raw_spin_is_locked(x) \
+ (*(volatile signed char *)(&(x)->slock) <= 0)
-#define spin_lock_string \
+#define __raw_spin_lock_string \
"\n1:\t" \
"lock ; decb %0\n\t" \
"jns 3f\n" \
@@ -57,7 +32,7 @@ typedef struct {
"jmp 1b\n" \
"3:\n\t"
-#define spin_lock_string_flags \
+#define __raw_spin_lock_string_flags \
"\n1:\t" \
"lock ; decb %0\n\t" \
"jns 4f\n\t" \
@@ -73,86 +48,71 @@ typedef struct {
"jmp 1b\n" \
"4:\n\t"
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ __asm__ __volatile__(
+ __raw_spin_lock_string
+ :"=m" (lock->slock) : : "memory");
+}
+
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__(
+ __raw_spin_lock_string_flags
+ :"=m" (lock->slock) : "r" (flags) : "memory");
+}
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ char oldval;
+ __asm__ __volatile__(
+ "xchgb %b0,%1"
+ :"=q" (oldval), "=m" (lock->slock)
+ :"0" (0) : "memory");
+ return oldval > 0;
+}
+
/*
- * This works. Despite all the confusion.
- * (except on PPro SMP or if we are using OOSTORE)
+ * __raw_spin_unlock based on writing $1 to the low byte.
+ * This method works. Despite all the confusion.
+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
* (PPro errata 66, 92)
*/
#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
"movb $1,%0" \
:"=m" (lock->slock) : : "memory"
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(lock->magic != SPINLOCK_MAGIC);
- BUG_ON(!spin_is_locked(lock));
-#endif
__asm__ __volatile__(
- spin_unlock_string
+ __raw_spin_unlock_string
);
}
#else
-#define spin_unlock_string \
+#define __raw_spin_unlock_string \
"xchgb %b0, %1" \
:"=q" (oldval), "=m" (lock->slock) \
:"0" (oldval) : "memory"
-static inline void _raw_spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
char oldval = 1;
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(lock->magic != SPINLOCK_MAGIC);
- BUG_ON(!spin_is_locked(lock));
-#endif
- __asm__ __volatile__(
- spin_unlock_string
- );
-}
-#endif
-
-static inline int _raw_spin_trylock(spinlock_t *lock)
-{
- char oldval;
__asm__ __volatile__(
- "xchgb %b0,%1"
- :"=q" (oldval), "=m" (lock->slock)
- :"0" (0) : "memory");
- return oldval > 0;
+ __raw_spin_unlock_string
+ );
}
-static inline void _raw_spin_lock(spinlock_t *lock)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
- printk("eip: %p\n", __builtin_return_address(0));
- BUG();
- }
#endif
- __asm__ __volatile__(
- spin_lock_string
- :"=m" (lock->slock) : : "memory");
-}
-static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
-{
-#ifdef CONFIG_DEBUG_SPINLOCK
- if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
- printk("eip: %p\n", __builtin_return_address(0));
- BUG();
- }
-#endif
- __asm__ __volatile__(
- spin_lock_string_flags
- :"=m" (lock->slock) : "r" (flags) : "memory");
-}
+#define __raw_spin_unlock_wait(lock) \
+ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
/*
* Read-write spinlocks, allowing multiple readers
@@ -163,72 +123,41 @@ static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
+ *
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ *
+ * Changed to use the same technique as rw semaphores. See
+ * semaphore.h for details. -ben
+ *
+ * the helpers are in arch/i386/kernel/semaphore.c
*/
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned magic;
-#endif
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT /* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define read_can_lock(x) ((int)(x)->lock > 0)
+#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
-/*
- * On x86, we implement read-write locks as a 32-bit counter
- * with the high bit (sign) being the "contended" bit.
- *
- * The inline assembly is non-obvious. Think about it.
- *
- * Changed to use the same technique as rw semaphores. See
- * semaphore.h for details. -ben
- */
-/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
-
-static inline void _raw_read_lock(rwlock_t *rw)
+static inline void __raw_read_lock(raw_rwlock_t *rw)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
__build_read_lock(rw, "__read_lock_failed");
}
-static inline void _raw_write_lock(rwlock_t *rw)
+static inline void __raw_write_lock(raw_rwlock_t *rw)
{
-#ifdef CONFIG_DEBUG_SPINLOCK
- BUG_ON(rw->magic != RWLOCK_MAGIC);
-#endif
__build_write_lock(rw, "__write_lock_failed");
}
-#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
-#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
-
-static inline int _raw_read_trylock(rwlock_t *lock)
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
atomic_dec(count);
@@ -238,7 +167,7 @@ static inline int _raw_read_trylock(rwlock_t *lock)
return 0;
}
-static inline int _raw_write_trylock(rwlock_t *lock)
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
if (atomic_sub_and_test(RW_LOCK_BIAS, count))
@@ -247,4 +176,15 @@ static inline int _raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+}
+
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
+{
+ asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ", %0"
+ : "=m" (rw->lock) : : "memory");
+}
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h
new file mode 100644
index 00000000000..59efe849f35
--- /dev/null
+++ b/include/asm-i386/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+
+typedef struct {
+ volatile unsigned int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS }
+
+#endif
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index d2430aa0d49..5b78611411c 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -17,28 +17,20 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-typedef struct {
- volatile unsigned int lock;
-#ifdef CONFIG_PREEMPT
- unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-#define spin_lock_init(x) ((x)->lock = 0)
+#define __raw_spin_lock_init(x) ((x)->lock = 0)
#ifdef ASM_SUPPORTED
/*
* Try to get the lock. If we fail to get the lock, make a non-standard call to
* ia64_spinlock_contention(). We do not use a normal call because that would force all
- * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
- * carefully coded to touch only those registers that spin_lock() marks "clobbered".
+ * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
+ * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered".
*/
#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
static inline void
-_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
+__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
{
register volatile unsigned int *ptr asm ("r31") = &lock->lock;
@@ -94,17 +86,17 @@ _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
#endif
}
-#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
/* Unlock by doing an ordered store and releasing the cacheline with nta */
-static inline void _raw_spin_unlock(spinlock_t *x) {
+static inline void __raw_spin_unlock(raw_spinlock_t *x) {
barrier();
asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x));
}
#else /* !ASM_SUPPORTED */
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-# define _ra