diff options
Diffstat (limited to 'include/asm-generic/mutex-xchg.h')
| -rw-r--r-- | include/asm-generic/mutex-xchg.h | 53 |
1 files changed, 26 insertions, 27 deletions
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 1d24f47e6c4..f169ec06478 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -1,9 +1,9 @@ /* - * asm-generic/mutex-xchg.h + * include/asm-generic/mutex-xchg.h * * Generic implementation of the mutex fastpath, based on xchg(). * - * NOTE: An xchg based implementation is less optimal than an atomic + * NOTE: An xchg based implementation might be less optimal than an atomic * decrement/increment based implementation. If your architecture * has a reasonable atomic dec/inc then you should probably use * asm-generic/mutex-dec.h instead, or you could open-code an @@ -22,34 +22,34 @@ * wasn't 1 originally. This function MUST leave the value lower than 1 * even when the "1" assertion wasn't true. */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - if (unlikely(atomic_xchg(count, 0) != 1)) \ - fail_fn(count); \ - else \ - smp_mb(); \ -} while (0) - +static inline void +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg(count, 0) != 1)) + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); +} /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_xchg(count, 0) != 1)) - return fail_fn(count); - else { - smp_mb(); - return 0; - } + if (likely(atomic_xchg(count, -1) != 1)) + return -1; + return 0; } /** @@ -64,12 +64,12 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs * to return 0 otherwise. */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - smp_mb(); \ - if (unlikely(atomic_xchg(count, 1) != 0)) \ - fail_fn(count); \ -} while (0) +static inline void +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) +{ + if (unlikely(atomic_xchg(count, 1) != 0)) + fail_fn(count); +} #define __mutex_slowpath_needs_to_unlock() 0 @@ -109,7 +109,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) if (prev < 0) prev = 0; } - smp_mb(); return prev; } |
