aboutsummaryrefslogtreecommitdiff
path: root/lib/lockref.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-10-28 14:39:03 -0700
committerOlof Johansson <olof@lixom.net>2013-10-28 14:39:03 -0700
commit43d93947a54cf9323198a3a37eaf3ec14adb23e1 (patch)
tree0c290a7fcbc644b94527f399e4f9509a8d379a5d /lib/lockref.c
parent02673f94d04e629e4cdc41e2bf2dc980743cf3df (diff)
parent54b89756a14aa1043507ce0811b4b6c02c5dddcc (diff)
Merge tag 'omap-for-v3.13/cm-scm-cleanup-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/cleanup
From Paul Walmsley <paul@pwsan.com> via Tony Lindgren: Move some of the OMAP2+ CM and System Control Module direct register accesses into CM- and System Control Module-specific "drivers" underneath arch/arm/mach-omap2/. This is a prerequisite for moving this code out of arch/arm/mach-omap2/ into drivers/. Basic test logs are available here: http://www.pwsan.com/omap/testlogs/cm_scm_cleanup_a_v3.13/20131019101809/ * tag 'omap-for-v3.13/cm-scm-cleanup-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: ARM: OMAP3: control: add API for setting IVA bootmode ARM: OMAP3: CM/control: move CM scratchpad save to CM driver ARM: OMAP3: McBSP: do not access CM register directly ARM: OMAP3: clock: add API to enable/disable autoidle for a single clock ARM: OMAP2: CM/PM: remove direct register accesses outside CM code + Linux 3.12-rc4 Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'lib/lockref.c')
-rw-r--r--lib/lockref.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/lib/lockref.c b/lib/lockref.c
index 677d036cf3c..6f9d434c152 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,6 +4,22 @@
#ifdef CONFIG_CMPXCHG_LOCKREF
/*
+ * Allow weakly-ordered memory architectures to provide barrier-less
+ * cmpxchg semantics for lockref updates.
+ */
+#ifndef cmpxchg64_relaxed
+# define cmpxchg64_relaxed cmpxchg64
+#endif
+
+/*
+ * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
+ * This is useful for architectures with an expensive cpu_relax().
+ */
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
+/*
* Note that the "cmpxchg()" reloads the "old" value for the
* failure case.
*/
@@ -14,12 +30,13 @@
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
struct lockref new = old, prev = old; \
CODE \
- old.lock_count = cmpxchg64(&lockref->lock_count, \
- old.lock_count, new.lock_count); \
+ old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
+ old.lock_count, \
+ new.lock_count); \
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- cpu_relax(); \
+ arch_mutex_cpu_relax(); \
} \
} while (0)