From 7a497c963eceac42677ce1f5d7bb470abedd15f4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 22 Aug 2013 18:16:16 -0700 Subject: rcu: Remove redundant code from rcu_cleanup_after_idle() The rcu_try_advance_all_cbs() function returns a bool saying whether or not there are callbacks ready to invoke, but rcu_cleanup_after_idle() rechecks this regardless. This commit therefore uses the value returned by rcu_try_advance_all_cbs() instead of making rcu_cleanup_after_idle() do this recheck. Reported-by: Tibor Billes Signed-off-by: Paul E. McKenney Tested-by: Tibor Billes Reviewed-by: Josh Triplett --- kernel/rcutree_plugin.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 130c97b027f..18d9c91f25d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1768,17 +1768,11 @@ static void rcu_prepare_for_idle(int cpu) */ static void rcu_cleanup_after_idle(int cpu) { - struct rcu_data *rdp; - struct rcu_state *rsp; if (rcu_is_nocb_cpu(cpu)) return; - rcu_try_advance_all_cbs(); - for_each_rcu_flavor(rsp) { - rdp = per_cpu_ptr(rsp->rda, cpu); - if (cpu_has_callbacks_ready_to_invoke(rdp)) - invoke_rcu_core(); - } + if (rcu_try_advance_all_cbs()) + invoke_rcu_core(); } /* -- cgit v1.2.3-18-g5258 From c229828ca6bc62d6c654f64b1d1b8a9ebd8a56f3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 25 Aug 2013 21:20:47 -0700 Subject: rcu: Throttle rcu_try_advance_all_cbs() execution The rcu_try_advance_all_cbs() function is invoked on each attempted entry to and every exit from idle. If this function determines that there are callbacks ready to invoke, the caller will invoke the RCU core, which in turn will result in a pair of context switches. If a CPU enters and exits idle extremely frequently, this can result in an excessive number of context switches and high CPU overhead. This commit therefore causes rcu_try_advance_all_cbs() to throttle itself, refusing to do work more than once per jiffy. Reported-by: Tibor Billes Signed-off-by: Paul E. McKenney Tested-by: Tibor Billes Reviewed-by: Josh Triplett --- kernel/rcutree.h | 2 ++ kernel/rcutree_plugin.h | 12 +++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 5f97eab602c..52be957c9fe 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h @@ -104,6 +104,8 @@ struct rcu_dynticks { /* idle-period nonlazy_posted snapshot. */ unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ + unsigned long last_advance_all; + /* Last jiffy CBs were all advanced. */ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ }; diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 18d9c91f25d..d81e3856fa9 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1630,17 +1630,23 @@ module_param(rcu_idle_lazy_gp_delay, int, 0644); extern int tick_nohz_enabled; /* - * Try to advance callbacks for all flavors of RCU on the current CPU. - * Afterwards, if there are any callbacks ready for immediate invocation, - * return true. + * Try to advance callbacks for all flavors of RCU on the current CPU, but + * only if it has been awhile since the last time we did so. Afterwards, + * if there are any callbacks ready for immediate invocation, return true. */ static bool rcu_try_advance_all_cbs(void) { bool cbs_ready = false; struct rcu_data *rdp; + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); struct rcu_node *rnp; struct rcu_state *rsp; + /* Exit early if we advanced recently. */ + if (jiffies == rdtp->last_advance_all) + return 0; + rdtp->last_advance_all = jiffies; + for_each_rcu_flavor(rsp) { rdp = this_cpu_ptr(rsp->rda); rnp = rdp->mynode; -- cgit v1.2.3-18-g5258 From c337f8f58ed7cf150651d232af8222421a71463d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Sep 2013 17:02:11 -0700 Subject: rcu: Throttle invoke_rcu_core() invocations due to non-lazy callbacks If a non-lazy callback arrives on a CPU that has previously gone idle with no non-lazy callbacks, invoke_rcu_core() forces the RCU core to run. However, it does not update the conditions, which could result in several closely spaced invocations of the RCU core, which in turn could result in an excessively high context-switch rate and resulting high overhead. This commit therefore updates the ->all_lazy and ->nonlazy_posted_snap fields to prevent closely spaced invocations. Reported-by: Tibor Billes Signed-off-by: Paul E. McKenney Tested-by: Tibor Billes Reviewed-by: Josh Triplett --- kernel/rcutree_plugin.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index d81e3856fa9..2c15d7c1068 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -1745,6 +1745,8 @@ static void rcu_prepare_for_idle(int cpu) */ if (rdtp->all_lazy && rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) { + rdtp->all_lazy = false; + rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; invoke_rcu_core(); return; } -- cgit v1.2.3-18-g5258 From cc6783f788d8fe8b23ec6fc2762f5e8c9a418eee Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Sep 2013 17:39:49 -0700 Subject: rcu: Is it safe to enter an RCU read-side critical section? There is currently no way for kernel code to determine whether it is safe to enter an RCU read-side critical section, in other words, whether or not RCU is paying attention to the currently running CPU. Given the large and increasing quantity of code shared by the idle loop and non-idle code, the this shortcoming is becoming increasingly painful. This commit therefore adds __rcu_is_watching(), which returns true if it is safe to enter an RCU read-side critical section on the currently running CPU. This function is quite fast, using only a __this_cpu_read(). However, the caller must disable preemption. Reported-by: Steven Rostedt Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcutiny.c | 4 ++-- kernel/rcutree.c | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 9ed6075dc56..b4bc61874d7 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -174,7 +174,7 @@ void rcu_irq_enter(void) } EXPORT_SYMBOL_GPL(rcu_irq_enter); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) /* * Test whether RCU thinks that the current CPU is idle. @@ -185,7 +185,7 @@ int rcu_is_cpu_idle(void) } EXPORT_SYMBOL(rcu_is_cpu_idle); -#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ /* * Test whether the current CPU was interrupted from idle. Nested diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 32618b3fe4e..910d868808d 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -671,6 +671,19 @@ int rcu_is_cpu_idle(void) } EXPORT_SYMBOL(rcu_is_cpu_idle); +/** + * __rcu_is_watching - are RCU read-side critical sections safe? + * + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. Unlike + * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at + * least disabled preemption. + */ +bool __rcu_is_watching(void) +{ + return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1); +} + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* -- cgit v1.2.3-18-g5258 From f9ffc31ebd38d2d74dbfe9f0b67274e99ad668f5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 8 Sep 2013 11:51:06 -0700 Subject: rcu: Change EXPORT_SYMBOL() to EXPORT_SYMBOL_GPL() Commit e6b80a3b (rcu: Detect illegal rcu dereference in extended quiescent state) exported the pre-existing rcu_is_cpu_idle() function using EXPORT_SYMBOL(). However, this is inconsistent with the remaining exports from RCU, which are all EXPORT_SYMBOL_GPL(). The current state of affairs means that a non-GPL module could use rcu_is_cpu_idle(), but in a CONFIG_TREE_PREEMPT_RCU=y kernel would be unable to invoke rcu_read_lock() and rcu_read_unlock(). This commit therefore makes rcu_is_cpu_idle()'s export be consistent with the rest of RCU, namely EXPORT_SYMBOL_GPL(). Signed-off-by: Paul E. McKenney Cc: Frederic Weisbecker Reviewed-by: Josh Triplett --- kernel/rcutree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 910d868808d..1b123e179d7 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -669,7 +669,7 @@ int rcu_is_cpu_idle(void) preempt_enable(); return ret; } -EXPORT_SYMBOL(rcu_is_cpu_idle); +EXPORT_SYMBOL_GPL(rcu_is_cpu_idle); /** * __rcu_is_watching - are RCU read-side critical sections safe? -- cgit v1.2.3-18-g5258 From 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Sep 2013 17:20:11 -0700 Subject: rcu: Consistent rcu_is_watching() naming The old rcu_is_cpu_idle() function is just __rcu_is_watching() with preemption disabled. This commit therefore renames rcu_is_cpu_idle() to rcu_is_watching. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/lockdep.c | 4 ++-- kernel/rcupdate.c | 2 +- kernel/rcutiny.c | 6 +++--- kernel/rcutree.c | 36 ++++++++++++++++++------------------ 4 files changed, 24 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b9ee7..4e8e14c34e4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", !rcu_lockdep_current_cpu_online() ? "RCU used illegally from offline CPU!\n" - : rcu_is_cpu_idle() + : !rcu_is_watching() ? "RCU used illegally from idle CPU!\n" : "", rcu_scheduler_active, debug_locks); @@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) * So complain bitterly if someone does call rcu_read_lock(), * rcu_read_lock_bh() and so on from extended quiescent states. */ - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) printk("RCU used illegally from extended quiescent state!\n"); lockdep_print_held_locks(curr); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index b02a339836b..3b3c0464d1e 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b4bc61874d7..0fa061dfa55 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -179,11 +179,11 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); /* * Test whether RCU thinks that the current CPU is idle. */ -int rcu_is_cpu_idle(void) +bool __rcu_is_watching(void) { - return !rcu_dynticks_nesting; + return rcu_dynticks_nesting; } -EXPORT_SYMBOL(rcu_is_cpu_idle); +EXPORT_SYMBOL(__rcu_is_watching); #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b123e179d7..981d0c15a38 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -655,34 +655,34 @@ void rcu_nmi_exit(void) } /** - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle + * __rcu_is_watching - are RCU read-side critical sections safe? + * + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. Unlike + * rcu_is_watching(), the caller of __rcu_is_watching() must have at + * least disabled preemption. + */ +bool __rcu_is_watching(void) +{ + return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; +} + +/** + * rcu_is_watching - see if RCU thinks that the current CPU is idle * * If the current CPU is in its idle loop and is neither in an interrupt * or NMI handler, return true. */ -int rcu_is_cpu_idle(void) +bool rcu_is_watching(void) { int ret; preempt_disable(); - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; + ret = __rcu_is_watching(); preempt_enable(); return ret; } -EXPORT_SYMBOL_GPL(rcu_is_cpu_idle); - -/** - * __rcu_is_watching - are RCU read-side critical sections safe? - * - * Return true if RCU is watching the running CPU, which means that - * this CPU can safely enter RCU read-side critical sections. Unlike - * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at - * least disabled preemption. - */ -bool __rcu_is_watching(void) -{ - return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1); -} +EXPORT_SYMBOL_GPL(rcu_is_watching); #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -2268,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, * If called from an extended quiescent state, invoke the RCU * core in order to force a re-evaluation of RCU's idleness. */ - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) + if (!rcu_is_watching() && cpu_online(smp_processor_id())) invoke_rcu_core(); /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ -- cgit v1.2.3-18-g5258