aboutsummaryrefslogtreecommitdiff
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h35
1 files changed, 25 insertions, 10 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 80e535897de..3f2867ff0ce 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -55,8 +55,8 @@
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
+#include <asm/barrier.h>
-#include <asm/system.h>
/*
* Must define these before including other files, inline functions need them
@@ -81,7 +81,7 @@
#include <linux/spinlock_types.h>
/*
- * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -117,9 +117,27 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
+#endif
+
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
#endif
/**
@@ -375,16 +393,13 @@ static inline int spin_can_lock(spinlock_t *lock)
return raw_spin_can_lock(&lock->rlock);
}
-static inline void assert_spin_locked(spinlock_t *lock)
-{
- assert_raw_spin_locked(&lock->rlock);
-}
+#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
*/
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/**
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter