aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-07-01 11:16:54 +0200
committerIngo Molnar <mingo@kernel.org>2013-07-01 11:18:53 +0200
commit2fd1b487884310d0aa0c0640179dc7490ad86313 (patch)
tree1083dce15bd7dc0858c3883b8a361242046c5e09 /kernel/rcutree.c
parent333bb864f192015a53b5060b829089decd0220ef (diff)
parent8bb495e3f02401ee6f76d1b1d77f3ac9f079e376 (diff)
Merge tag 'v3.10' into sched/core
Merge in a recent upstream commit: c2853c8df57f include/linux/math64.h: add div64_ul() because: 72a4cf20cb71 sched: Change cfs_rq load avg to unsigned long relies on it. [ We don't rebase sched/core for this, because the handful of followup commits after the broken commit are not behavioral changes so are unlikely to be needed during bisection. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 16ea6792501..35380019f0f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1451,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
rnp->grphi, rnp->qsmask);
raw_spin_unlock_irq(&rnp->lock);
#ifdef CONFIG_PROVE_RCU_DELAY
- if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 &&
+ if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
system_state == SYSTEM_RUNNING)
- schedule_timeout_uninterruptible(2);
+ udelay(200);
#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
cond_resched();
}
@@ -1613,6 +1613,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
}
}
+static void rsp_wakeup(struct irq_work *work)
+{
+ struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
+
+ /* Wake up rcu_gp_kthread() to start the grace period. */
+ wake_up(&rsp->gp_wq);
+}
+
/*
* Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold
@@ -1637,8 +1645,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
}
rsp->gp_flags = RCU_GP_FLAG_INIT;
- /* Wake up rcu_gp_kthread() to start the grace period. */
- wake_up(&rsp->gp_wq);
+ /*
+ * We can't do wakeups while holding the rnp->lock, as that
+ * could cause possible deadlocks with the rq->lock. Deter
+ * the wakeup to interrupt context.
+ */
+ irq_work_queue(&rsp->wakeup_work);
}
/*
@@ -3235,6 +3247,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
rsp->rda = rda;
init_waitqueue_head(&rsp->gp_wq);
+ init_irq_work(&rsp->wakeup_work, rsp_wakeup);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)