diff options
Diffstat (limited to 'include/asm-generic/mutex-xchg.h')
| -rw-r--r-- | include/asm-generic/mutex-xchg.h | 34 |
1 files changed, 16 insertions, 18 deletions
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index 32a2100c1ae..f169ec06478 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h @@ -1,5 +1,5 @@ /* - * asm-generic/mutex-xchg.h + * include/asm-generic/mutex-xchg.h * * Generic implementation of the mutex fastpath, based on xchg(). * @@ -23,33 +23,33 @@ * even when the "1" assertion wasn't true. */ static inline void -__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - fail_fn(count); - else - smp_mb(); + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); } /** * __mutex_fastpath_lock_retval - try to take the lock by moving the count * from 1 to a 0 value * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns + * Change the count from 1 to a value lower than 1. This function returns 0 + * if the fastpath succeeds, or -1 otherwise. */ static inline int -__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *)) +__mutex_fastpath_lock_retval(atomic_t *count) { if (unlikely(atomic_xchg(count, 0) != 1)) - return fail_fn(count); - else { - smp_mb(); - return 0; - } + if (likely(atomic_xchg(count, -1) != 1)) + return -1; + return 0; } /** @@ -65,9 +65,8 @@ __mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t * * to return 0 otherwise. */ static inline void -__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *)) +__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) { - smp_mb(); if (unlikely(atomic_xchg(count, 1) != 0)) fail_fn(count); } @@ -110,7 +109,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) if (prev < 0) prev = 0; } - smp_mb(); return prev; } |
