aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-04-07 22:47:23 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-05 23:16:59 -0700
commit2655d57ef35aa327a2e58a1c5dc7b65c65003f4e (patch)
tree4a71e82b23ee84705cad15cd16490f8c5aace74f /kernel/rcutree.c
parentbaa1ae0c9f1c618bc60706efa75fef3508bcee58 (diff)
rcu: prevent call_rcu() from diving into rcu core if irqs disabled
This commit marks a first step towards making call_rcu() have real-time behavior. If irqs are disabled, don't dive into the RCU core. Later on, this new early exit will wake up the per-CPU kthread, which first must be modified to handle the cases involving callback storms. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index fe85600ba8c..78923a50cdb 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1839,6 +1839,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
/* Add the callback to our list. */
*rdp->nxttail[RCU_NEXT_TAIL] = head;
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
+ rdp->qlen++;
+
+ /* If interrupts were disabled, don't dive into RCU core. */
+ if (irqs_disabled_flags(flags)) {
+ local_irq_restore(flags);
+ return;
+ }
/*
* Force the grace period if too many callbacks or too long waiting.
@@ -1847,7 +1854,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
* invoking force_quiescent_state() if the newly enqueued callback
* is the only one waiting for a grace period to complete.
*/
- if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+ if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
/* Are we ignoring a completed grace period? */
rcu_process_gp_end(rsp, rdp);