aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mutex.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mutex.h')
-rw-r--r--include/linux/mutex.h109
1 files changed, 86 insertions, 23 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1ac507fa20..42aa9b9ecd5 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,11 +10,14 @@
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H
+#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
-
-#include <asm/atomic.h>
+#include <linux/lockdep.h>
+#include <linux/atomic.h>
+#include <asm/processor.h>
+#include <linux/osq_lock.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -28,7 +31,8 @@
* - task may not exit with mutex held
* - memory areas where held locks reside must not be freed
* - held mutexes must not be reinitialized
- * - mutexes may not be used in irq contexts
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
*
* These semantics are fully enforced when DEBUG_MUTEXES is
* enabled. Furthermore, besides enforcing the above rules, the mutex
@@ -48,13 +52,19 @@ struct mutex {
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
+ struct task_struct *owner;
+#endif
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
#ifdef CONFIG_DEBUG_MUTEXES
- struct thread_info *owner;
- struct list_head held_list;
- unsigned long acquire_ip;
const char *name;
void *magic;
#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
/*
@@ -65,7 +75,6 @@ struct mutex_waiter {
struct list_head list;
struct task_struct *task;
#ifdef CONFIG_DEBUG_MUTEXES
- struct mutex *lock;
void *magic;
#endif
};
@@ -74,47 +83,101 @@ struct mutex_waiter {
# include <linux/mutex-debug.h>
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
-# define mutex_init(mutex) __mutex_init(mutex, NULL)
-# define mutex_destroy(mutex) do { } while (0)
-# define mutex_debug_show_all_locks() do { } while (0)
-# define mutex_debug_show_held_locks(p) do { } while (0)
-# define mutex_debug_check_no_locks_held(task) do { } while (0)
-# define mutex_debug_check_no_locks_freed(from, len) do { } while (0)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+# define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+static inline void mutex_destroy(struct mutex *lock) {}
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
- , .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
- __DEBUG_MUTEX_INITIALIZER(lockname) }
+ __DEBUG_MUTEX_INITIALIZER(lockname) \
+ __DEP_MAP_MUTEX_INITIALIZER(lockname) }
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void fastcall __mutex_init(struct mutex *lock, const char *name);
+extern void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
-/***
+/**
* mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
-static inline int fastcall mutex_is_locked(struct mutex *lock)
+static inline int mutex_is_locked(struct mutex *lock)
{
return atomic_read(&lock->count) != 1;
}
/*
- * See kernel/mutex.c for detailed documentation of these APIs.
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/mutex-design.txt.
*/
-extern void fastcall mutex_lock(struct mutex *lock);
-extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+
+extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
+ unsigned int subclass);
+extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
+ unsigned int subclass);
+
+#define mutex_lock(lock) mutex_lock_nested(lock, 0)
+#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
+#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+
+#define mutex_lock_nest_lock(lock, nest_lock) \
+do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+} while (0)
+
+#else
+extern void mutex_lock(struct mutex *lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock);
+
+# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+#endif
+
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
-extern int fastcall mutex_trylock(struct mutex *lock);
-extern void fastcall mutex_unlock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+#ifndef arch_mutex_cpu_relax
+# define arch_mutex_cpu_relax() cpu_relax()
#endif
+
+#endif /* __LINUX_MUTEX_H */