diff options
-rw-r--r-- | kernel/rcutree.c | 13 | ||||
-rw-r--r-- | kernel/rcutree.h | 2 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 13 |
3 files changed, 19 insertions, 9 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 9970116163b..ade788320dd 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -545,6 +545,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) int cpu; long delta; unsigned long flags; + int ndetected; struct rcu_node *rnp = rcu_get_root(rsp); /* Only let one CPU complain about others per time interval. */ @@ -561,7 +562,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp) * Now rat on any tasks that got kicked up to the root rcu_node * due to CPU offlining. */ - rcu_print_task_stall(rnp); + ndetected = rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); /* @@ -573,17 +574,21 @@ static void print_other_cpu_stall(struct rcu_state *rsp) rsp->name); rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); - rcu_print_task_stall(rnp); + ndetected += rcu_print_task_stall(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); if (rnp->qsmask == 0) continue; for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) - if (rnp->qsmask & (1UL << cpu)) + if (rnp->qsmask & (1UL << cpu)) { printk(" %d", rnp->grplo + cpu); + ndetected++; + } } printk("} (detected by %d, t=%ld jiffies)\n", smp_processor_id(), (long)(jiffies - rsp->gp_start)); - if (!trigger_all_cpu_backtrace()) + if (ndetected == 0) + printk(KERN_ERR "INFO: Stall ended before state dump start\n"); + else if (!trigger_all_cpu_backtrace()) dump_stack(); /* If so configured, complain about tasks blocking the grace period. */ diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 51638b68b2d..f509f728f9f 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -438,7 +438,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, static void rcu_stop_cpu_kthread(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); -static void rcu_print_task_stall(struct rcu_node *rnp); +static int rcu_print_task_stall(struct rcu_node *rnp); static void rcu_preempt_stall_reset(void); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index eeb38ee8ebb..d3127e8764c 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -483,16 +483,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) * Scan the current list of tasks blocked within RCU read-side critical * sections, printing out the tid of each. */ -static void rcu_print_task_stall(struct rcu_node *rnp) +static int rcu_print_task_stall(struct rcu_node *rnp) { struct task_struct *t; + int ndetected = 0; if (!rcu_preempt_blocked_readers_cgp(rnp)) - return; + return 0; t = list_entry(rnp->gp_tasks, struct task_struct, rcu_node_entry); - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { printk(" P%d", t->pid); + ndetected++; + } + return ndetected; } /* @@ -976,8 +980,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp) * Because preemptible RCU does not exist, we never have to check for * tasks blocked within RCU read-side critical sections. */ -static void rcu_print_task_stall(struct rcu_node *rnp) +static int rcu_print_task_stall(struct rcu_node *rnp) { + return 0; } /* |