aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile19
-rw-r--r--kernel/audit.c6
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/capability.c21
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/cpuset.c38
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/lockdep.c80
-rw-r--r--kernel/lockdep_internals.h6
-rw-r--r--kernel/lockdep_proc.c97
-rw-r--r--kernel/marker.c30
-rw-r--r--kernel/mutex-debug.c2
-rw-r--r--kernel/mutex.c5
-rw-r--r--kernel/pm_qos_params.c7
-rw-r--r--kernel/printk.c112
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/rcuclassic.c16
-rw-r--r--kernel/rcupreempt.c20
-rw-r--r--kernel/sched.c855
-rw-r--r--kernel/sched_clock.c137
-rw-r--r--kernel/sched_cpupri.c174
-rw-r--r--kernel/sched_cpupri.h36
-rw-r--r--kernel/sched_debug.c64
-rw-r--r--kernel/sched_fair.c413
-rw-r--r--kernel/sched_features.h7
-rw-r--r--kernel/sched_rt.c405
-rw-r--r--kernel/sched_stats.h42
-rw-r--r--kernel/semaphore.c1
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/softlockup.c1
-rw-r--r--kernel/spinlock.c2
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sysctl.c19
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/trace/Kconfig135
-rw-r--r--kernel/trace/Makefile24
-rw-r--r--kernel/trace/ftrace.c1727
-rw-r--r--kernel/trace/trace.c3161
-rw-r--r--kernel/trace/trace.h339
-rw-r--r--kernel/trace/trace_functions.c81
-rw-r--r--kernel/trace/trace_irqsoff.c486
-rw-r--r--kernel/trace/trace_mmiotrace.c295
-rw-r--r--kernel/trace/trace_sched_switch.c286
-rw-r--r--kernel/trace/trace_sched_wakeup.c448
-rw-r--r--kernel/trace/trace_selftest.c563
-rw-r--r--kernel/trace/trace_selftest_dynamic.c7
-rw-r--r--kernel/trace/trace_sysprof.c363
-rw-r--r--kernel/workqueue.c2
53 files changed, 9907 insertions, 694 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 1c9938addb9..f6328e16dfd 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -3,7 +3,7 @@
#
obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
- exit.o itimer.o time.o softirq.o resource.o \
+ cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
@@ -11,6 +11,18 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o
+CFLAGS_REMOVE_sched.o = -mno-spe
+
+ifdef CONFIG_FTRACE
+# Do not trace debug files and internal ftrace files
+CFLAGS_REMOVE_lockdep.o = -pg
+CFLAGS_REMOVE_lockdep_proc.o = -pg
+CFLAGS_REMOVE_mutex-debug.o = -pg
+CFLAGS_REMOVE_rtmutex-debug.o = -pg
+CFLAGS_REMOVE_cgroup-debug.o = -pg
+CFLAGS_REMOVE_sched_clock.o = -pg
+endif
+
obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
@@ -27,7 +39,7 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-obj-$(CONFIG_SMP) += cpu.o spinlock.o
+obj-$(CONFIG_SMP) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_UID16) += uid16.o
@@ -69,6 +81,9 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
obj-$(CONFIG_MARKERS) += marker.o
obj-$(CONFIG_LATENCYTOP) += latencytop.o
+obj-$(CONFIG_FTRACE) += trace/
+obj-$(CONFIG_TRACING) += trace/
+obj-$(CONFIG_SMP) += sched_cpupri.o
ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/audit.c b/kernel/audit.c
index e8692a5748c..e092f1c0ce3 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -738,7 +738,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
return 0;
- err = audit_filter_user(&NETLINK_CB(skb), msg_type);
+ err = audit_filter_user(&NETLINK_CB(skb));
if (err == 1) {
err = 0;
if (msg_type == AUDIT_USER_TTY) {
@@ -779,7 +779,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
/* fallthrough */
case AUDIT_LIST:
- err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
+ err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
uid, seq, data, nlmsg_len(nlh),
loginuid, sessionid, sid);
break;
@@ -798,7 +798,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
/* fallthrough */
case AUDIT_LIST_RULES:
- err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
+ err = audit_receive_filter(msg_type, NETLINK_CB(skb).pid,
uid, seq, data, nlmsg_len(nlh),
loginuid, sessionid, sid);
break;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 0e0bd27e651..98c50cc671b 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1544,6 +1544,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
* @data: payload data
* @datasz: size of payload data
* @loginuid: loginuid of sender
+ * @sessionid: sessionid for netlink audit message
* @sid: SE Linux Security ID of sender
*/
int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
@@ -1720,7 +1721,7 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
return 1;
}
-int audit_filter_user(struct netlink_skb_parms *cb, int type)
+int audit_filter_user(struct netlink_skb_parms *cb)
{
enum audit_state state = AUDIT_DISABLED;
struct audit_entry *e;
diff --git a/kernel/capability.c b/kernel/capability.c
index cfbe4429948..901e0fdc3ff 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -121,6 +121,27 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
* uninteresting and/or not to be changed.
*/
+/*
+ * Atomically modify the effective capabilities returning the original
+ * value. No permission check is performed here - it is assumed that the
+ * caller is permitted to set the desired effective capabilities.
+ */
+kernel_cap_t cap_set_effective(const kernel_cap_t pE_new)
+{
+ kernel_cap_t pE_old;
+
+ spin_lock(&task_capability_lock);
+
+ pE_old = current->cap_effective;
+ current->cap_effective = pE_new;
+
+ spin_unlock(&task_capability_lock);
+
+ return pE_old;
+}
+
+EXPORT_SYMBOL(cap_set_effective);
+
/**
* sys_capget - get the capabilities of a given process.
* @header: pointer to struct that contains capability version and
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c77bc3a1c72..b11f06dc149 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,6 +15,28 @@
#include <linux/stop_machine.h>
#include <linux/mutex.h>
+/*
+ * Represents all cpu's present in the system
+ * In systems capable of hotplug, this map could dynamically grow
+ * as new cpu's are detected in the system via any platform specific
+ * method, such as ACPI for e.g.
+ */
+cpumask_t cpu_present_map __read_mostly;
+EXPORT_SYMBOL(cpu_present_map);
+
+#ifndef CONFIG_SMP
+
+/*
+ * Represents all cpu's that are currently online.
+ */
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_online_map);
+
+cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
+EXPORT_SYMBOL(cpu_possible_map);
+
+#else /* CONFIG_SMP */
+
/* Serializes the updates to cpu_online_map, cpu_present_map */
static DEFINE_MUTEX(cpu_add_remove_lock);
@@ -403,3 +425,5 @@ out:
cpu_maps_update_done();
}
#endif /* CONFIG_PM_SLEEP_SMP */
+
+#endif /* CONFIG_SMP */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9fceb97e989..459d601947a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1194,6 +1194,15 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
+ if (tsk->flags & PF_THREAD_BOUND) {
+ cpumask_t mask;
+
+ mutex_lock(&callback_mutex);
+ mask = cs->cpus_allowed;
+ mutex_unlock(&callback_mutex);
+ if (!cpus_equal(tsk->cpus_allowed, mask))
+ return -EINVAL;
+ }
return security_task_setscheduler(tsk, 0, NULL);
}
@@ -1207,11 +1216,14 @@ static void cpuset_attach(struct cgroup_subsys *ss,
struct mm_struct *mm;
struct cpuset *cs = cgroup_cs(cont);
struct cpuset *oldcs = cgroup_cs(oldcont);
+ int err;
mutex_lock(&callback_mutex);
guarantee_online_cpus(cs, &cpus);
- set_cpus_allowed_ptr(tsk, &cpus);
+ err = set_cpus_allowed_ptr(tsk, &cpus);
mutex_unlock(&callback_mutex);
+ if (err)
+ return;
from = oldcs->mems_allowed;
to = cs->mems_allowed;
@@ -1882,7 +1894,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
* in order to minimize text size.
*/
-static void common_cpu_mem_hotplug_unplug(void)
+static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
{
cgroup_lock();
@@ -1894,7 +1906,8 @@ static void common_cpu_mem_hotplug_unplug(void)
* Scheduler destroys domains on hotplug events.
* Rebuild them based on the current settings.
*/
- rebuild_sched_domains();
+ if (rebuild_sd)
+ rebuild_sched_domains();
cgroup_unlock();
}
@@ -1912,11 +1925,22 @@ static void common_cpu_mem_hotplug_unplug(void)
static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
unsigned long phase, void *unused_cpu)
{
- if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
+ switch (phase) {
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ common_cpu_mem_hotplug_unplug(1);
+ break;
+ default:
return NOTIFY_DONE;
+ }
- common_cpu_mem_hotplug_unplug();
- return 0;
+ return NOTIFY_OK;
}
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1929,7 +1953,7 @@ static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
void cpuset_track_online_nodes(void)
{
- common_cpu_mem_hotplug_unplug();
+ common_cpu_mem_hotplug_unplug(0);
}
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index 8f6185e69b6..ceb25878283 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -13,6 +13,7 @@
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mnt_namespace.h>
+#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index 19908b26cf8..4bd2f516401 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,6 +23,7 @@
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
+#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
@@ -909,7 +910,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
rt_mutex_init_task(p);
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 861b4088092..e0da4fc9f93 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1003,10 +1003,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
*/
raise = timer->state == HRTIMER_STATE_PENDING;
+ /*
+ * We use preempt_disable to prevent this task from migrating after
+ * setting up the softirq and raising it. Otherwise, if me migrate
+ * we will raise the softirq on the wrong CPU.
+ */
+ preempt_disable();
+
unlock_hrtimer_base(timer, &flags);
if (raise)
hrtimer_raise_softirq();
+ preempt_enable();
return ret;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d4998f81e22..1485ca8d0e0 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -79,7 +79,7 @@ static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
*
* For such cases, we now have a blacklist
*/
-struct kprobe_blackpoint kprobe_blacklist[] = {
+static struct kprobe_blackpoint kprobe_blacklist[] = {
{"preempt_schedule",},
{NULL} /* Terminator */
};
diff --git a/kernel/kthread.c b/kernel/kthread.c
index bd1b9ea024e..97747cdd37c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,6 +180,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
set_task_cpu(k, cpu);
k->cpus_allowed = cpumask_of_cpu(cpu);
k->rt.nr_cpus_allowed = 1;
+ k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 81a4e4a3f08..d38a6436297 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -39,6 +39,7 @@
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/hash.h>
+#include <linux/ftrace.h>
#include <asm/sections.h>
@@ -81,6 +82,8 @@ static int graph_lock(void)
__raw_spin_unlock(&lockdep_lock);
return 0;
}
+ /* prevent any recursions within lockdep from causing deadlocks */
+ current->lockdep_recursion++;
return 1;
}
@@ -89,6 +92,7 @@ static inline int graph_unlock(void)
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
+ current->lockdep_recursion--;
__raw_spin_unlock(&lockdep_lock);
return 0;
}
@@ -982,7 +986,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
return 1;
}
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
@@ -1458,7 +1462,14 @@ out_bug:
}
unsigned long nr_lock_chains;
-static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+int nr_chain_hlocks;
+static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
+{
+ return lock_classes + chain_hlocks[chain->base + i];
+}
/*
* Look up a dependency chain. If the key is not present yet then
@@ -1466,10 +1477,15 @@ static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
-static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
+static inline int lookup_chain_cache(struct task_struct *curr,
+ struct held_lock *hlock,
+ u64 chain_key)
{
+ struct lock_class *class = hlock->class;
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
+ struct held_lock *hlock_curr, *hlock_next;
+ int i, j, n, cn;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
@@ -1517,6 +1533,32 @@ cache_hit:
}
chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
+ chain->irq_context = hlock->irq_context;
+ /* Find the first held_lock of current chain */
+ hlock_next = hlock;
+ for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+ hlock_curr = curr->held_locks + i;
+ if (hlock_curr->irq_context != hlock_next->irq_context)
+ break;
+ hlock_next = hlock;
+ }
+ i++;
+ chain->depth = curr->lockdep_depth + 1 - i;
+ cn = nr_chain_hlocks;
+ while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
+ n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
+ if (n == cn)
+ break;
+ cn = n;
+ }
+ if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ chain->base = cn;
+ for (j = 0; j < chain->depth - 1; j++, i++) {
+ int lock_id = curr->held_locks[i].class - lock_classes;
+ chain_hlocks[chain->base + j] = lock_id;
+ }
+ chain_hlocks[chain->base + j] = class - lock_classes;
+ }
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
inc_chains();
@@ -1538,7 +1580,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
- lookup_chain_cache(chain_key, hlock->class)) {
+ lookup_chain_cache(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
*
@@ -1680,7 +1722,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
/*
* print irq inversion bug:
@@ -2013,11 +2055,13 @@ void early_boot_irqs_on(void)
/*
* Hardirqs will be enabled:
*/
-void trace_hardirqs_on(void)
+void trace_hardirqs_on_caller(unsigned long a0)
{
struct task_struct *curr = current;
unsigned long ip;
+ time_hardirqs_on(CALLER_ADDR0, a0);
+
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2055,16 +2099,23 @@ void trace_hardirqs_on(void)
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events);
}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+void trace_hardirqs_on(void)
+{
+ trace_hardirqs_on_caller(CALLER_ADDR0);
+}
EXPORT_SYMBOL(trace_hardirqs_on);
/*
* Hardirqs were disabled:
*/
-void trace_hardirqs_off(void)
+void trace_hardirqs_off_caller(unsigned long a0)
{
struct task_struct *curr = current;
+ time_hardirqs_off(CALLER_ADDR0, a0);
+
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
@@ -2082,7 +2133,12 @@ void trace_hardirqs_off(void)
} else
debug_atomic_inc(&redundant_hardirqs_off);
}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+void trace_hardirqs_off(void)
+{
+ trace_hardirqs_off_caller(CALLER_ADDR0);
+}
EXPORT_SYMBOL(trace_hardirqs_off);
/*
@@ -2246,7 +2302,7 @@ static inline int separate_irq_context(struct task_struct *curr,
* Mark a lock with a usage bit, and validate the state transition:
*/
static int mark_lock(struct task_struct *curr, struct held_lock *this,
- enum lock_usage_bit new_bit)
+ enum lock_usage_bit new_bit)
{
unsigned int new_mask = 1 << new_bit, ret = 1;
@@ -2650,7 +2706,8 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
*/
static void check_flags(unsigned long flags)
{
-#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
+ defined(CONFIG_TRACE_IRQFLAGS)
if (!debug_locks)
return;
@@ -2686,7 +2743,7 @@ static void check_flags(unsigned long flags)
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check, unsigned long ip)
+ int trylock, int read, int check, unsigned long ip)
{
unsigned long flags;
@@ -2708,7 +2765,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
EXPORT_SYMBOL_GPL(lock_acquire);
-void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip)
{
unsigned long flags;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 8ce09bc4613..c3600a091a2 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -23,6 +23,8 @@
#define MAX_LOCKDEP_CHAINS_BITS 14
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
+#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
+
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock.
@@ -30,15 +32,19 @@
#define MAX_STACK_TRACE_ENTRIES 262144UL
extern struct list_head all_lock_classes;
+extern struct lock_chain lock_chains[];
extern void
get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4);
extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
+struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
+
extern unsigned long nr_lock_classes;
extern unsigned long nr_list_entries;
extern unsigned long nr_lock_chains;
+extern int nr_chain_hlocks;
extern unsigned long nr_stack_trace_entries;
extern unsigned int nr_hardirq_chains;
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index dc5d29648d8..9b0e940e254 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -139,7 +139,7 @@ static int l_show(struct seq_file *m, void *v)
list_for_each_entry(entry, &class->locks_after, entry) {
if (entry->distance == 1) {
- seq_printf(m, " -> [%p] ", entry->class);
+ seq_printf(m, " -> [%p] ", entry->class->key);
print_name(m, entry->class);
seq_puts(m, "\n");
}
@@ -178,6 +178,95 @@ static const struct file_operations proc_lockdep_operations = {
.release = seq_release,
};
+#ifdef CONFIG_PROVE_LOCKING
+static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct lock_chain *chain;
+
+ (*pos)++;
+
+ if (v == SEQ_START_TOKEN)
+ chain = m->private;
+ else {
+ chain = v;
+
+ if (*pos < nr_lock_chains)
+ chain = lock_chains + *pos;
+ else
+ chain = NULL;
+ }
+
+ return chain;
+}
+
+static void *lc_start(struct seq_file *m, loff_t *pos)
+{
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos < nr_lock_chains)
+ return lock_chains + *pos;
+
+ return NULL;
+}
+
+static void lc_stop(struct seq_file *m, void *v)
+{
+}
+
+static int lc_show(struct seq_file *m, void *v)
+{
+ struct lock