diff options
Diffstat (limited to 'lib/kernel_lock.c')
| -rw-r--r-- | lib/kernel_lock.c | 20 | 
1 files changed, 14 insertions, 6 deletions
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 39f1029e352..4ebfa5a164d 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -5,10 +5,13 @@   * relegated to obsolescence, but used by various less   * important (or lazy) subsystems.   */ -#include <linux/smp_lock.h>  #include <linux/module.h>  #include <linux/kallsyms.h>  #include <linux/semaphore.h> +#include <linux/smp_lock.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/bkl.h>  /*   * The 'big kernel lock' @@ -113,21 +116,26 @@ static inline void __unlock_kernel(void)   * This cannot happen asynchronously, so we only need to   * worry about other CPU's.   */ -void __lockfunc lock_kernel(void) +void __lockfunc _lock_kernel(const char *func, const char *file, int line)  { -	int depth = current->lock_depth+1; +	int depth = current->lock_depth + 1; + +	trace_lock_kernel(func, file, line); +  	if (likely(!depth))  		__lock_kernel();  	current->lock_depth = depth;  } -void __lockfunc unlock_kernel(void) +void __lockfunc _unlock_kernel(const char *func, const char *file, int line)  {  	BUG_ON(current->lock_depth < 0);  	if (likely(--current->lock_depth < 0))  		__unlock_kernel(); + +	trace_unlock_kernel(func, file, line);  } -EXPORT_SYMBOL(lock_kernel); -EXPORT_SYMBOL(unlock_kernel); +EXPORT_SYMBOL(_lock_kernel); +EXPORT_SYMBOL(_unlock_kernel);  | 
