aboutsummaryrefslogtreecommitdiff
path: root/lib/dec_and_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/dec_and_lock.c')
-rw-r--r--lib/dec_and_lock.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 6658d81e183..e26278576b3 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -1,12 +1,10 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
- * This is an architecture-neutral, but slow,
- * implementation of the notion of "decrement
- * a reference count, and return locked if it
- * decremented to zero".
+ * This is an implementation of the notion of "decrement a
+ * reference count, and return locked if it decremented to zero".
*
* NOTE NOTE NOTE! This is _not_ equivalent to
*
@@ -18,17 +16,14 @@
*
* because the spin-lock and the decrement must be
* "atomic".
- *
- * This slow version gets the spinlock unconditionally,
- * and releases it if it isn't needed. Architectures
- * are encouraged to come up with better approaches,
- * this is trivially done efficiently using a load-locked
- * store-conditional approach, for example.
*/
-
-#ifndef ATOMIC_DEC_AND_LOCK
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
@@ -37,4 +32,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif