aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-05-25 13:42:06 -0700
committerIngo Molnar <mingo@elte.hu>2011-05-28 17:41:56 +0200
commitcc3ce5176d83cd8ae1134f86e208ea758d6cb78e (patch)
treea28ada4c1e5fd8c3e8210ca1a1df950af8a704ff
parent08bca60a6912ad225254250c0a9c3a05b4152cfa (diff)
rcu: Start RCU kthreads in TASK_INTERRUPTIBLE state
Upon creation, kthreads are in TASK_UNINTERRUPTIBLE state, which can result in softlockup warnings. Because some of RCU's kthreads can legitimately be idle indefinitely, start them in TASK_INTERRUPTIBLE state in order to avoid those warnings. Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/rcutree_plugin.h1
2 files changed, 3 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 05e254e930e..77a7671dd14 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1648,6 +1648,7 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
if (IS_ERR(t))
return PTR_ERR(t);
kthread_bind(t, cpu);
+ set_task_state(t, TASK_INTERRUPTIBLE);
per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
per_cpu(rcu_cpu_kthread_task, cpu) = t;
@@ -1755,6 +1756,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
+ set_task_state(t, TASK_INTERRUPTIBLE);
rnp->node_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
sp.sched_priority = 99;
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 049f2787a98..a767b7dac36 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1295,6 +1295,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
if (IS_ERR(t))
return PTR_ERR(t);
raw_spin_lock_irqsave(&rnp->lock, flags);
+ set_task_state(t, TASK_INTERRUPTIBLE);
rnp->boost_kthread_task = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
sp.sched_priority = RCU_KTHREAD_PRIO;