aboutsummaryrefslogtreecommitdiff
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h51
1 files changed, 35 insertions, 16 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 86088213334..3f2867ff0ce 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -50,17 +50,18 @@
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
+#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
+#include <asm/barrier.h>
-#include <asm/system.h>
/*
* Must define these before including other files, inline functions need them
*/
-#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
@@ -80,7 +81,7 @@
#include <linux/spinlock_types.h>
/*
- * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -116,9 +117,27 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
+#endif
+
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
#endif
/**
@@ -128,19 +147,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void do_raw_spin_lock(raw_spinlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
- extern void do_raw_spin_unlock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
-static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
+ __acquire(lock);
arch_spin_lock(&lock->raw_lock);
}
static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
{
+ __acquire(lock);
arch_spin_lock_flags(&lock->raw_lock, *flags);
}
@@ -149,9 +170,10 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
return arch_spin_trylock(&(lock)->raw_lock);
}
-static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
arch_spin_unlock(&lock->raw_lock);
+ __release(lock);
}
#endif
@@ -371,16 +393,13 @@ static inline int spin_can_lock(spinlock_t *lock)
return raw_spin_can_lock(&lock->rlock);
}
-static inline void assert_spin_locked(spinlock_t *lock)
-{
- assert_raw_spin_locked(&lock->rlock);
-}
+#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/**
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter