From cc6783f788d8fe8b23ec6fc2762f5e8c9a418eee Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Sep 2013 17:39:49 -0700 Subject: rcu: Is it safe to enter an RCU read-side critical section? There is currently no way for kernel code to determine whether it is safe to enter an RCU read-side critical section, in other words, whether or not RCU is paying attention to the currently running CPU. Given the large and increasing quantity of code shared by the idle loop and non-idle code, the this shortcoming is becoming increasingly painful. This commit therefore adds __rcu_is_watching(), which returns true if it is safe to enter an RCU read-side critical section on the currently running CPU. This function is quite fast, using only a __this_cpu_read(). However, the caller must disable preemption. Reported-by: Steven Rostedt Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 8 ++++---- include/linux/rcutiny.h | 9 +++++++++ include/linux/rcutree.h | 2 ++ 3 files changed, 15 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index f1f1bc39346..a53a21a2808 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -261,6 +261,10 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, rcu_irq_exit(); \ } while (0) +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) +extern int rcu_is_cpu_idle(void); +#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. @@ -297,10 +301,6 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head) } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ -#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) -extern int rcu_is_cpu_idle(void); -#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */ - #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e31005ee339..bee66596487 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -132,4 +132,13 @@ static inline void rcu_scheduler_starting(void) } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ +#ifdef CONFIG_RCU_TRACE + +static inline bool __rcu_is_watching(void) +{ + return !rcu_is_cpu_idle(); +} + +#endif /* #ifdef CONFIG_RCU_TRACE */ + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 226169d1bd2..293613dfd2a 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -90,4 +90,6 @@ extern void exit_rcu(void); extern void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; +extern bool __rcu_is_watching(void); + #endif /* __LINUX_RCUTREE_H */ -- cgit v1.2.3-70-g09d2 From 5c173eb8bcb9c1aa888bd6d14a4cb746f3dd2420 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Sep 2013 17:20:11 -0700 Subject: rcu: Consistent rcu_is_watching() naming The old rcu_is_cpu_idle() function is just __rcu_is_watching() with preemption disabled. This commit therefore renames rcu_is_cpu_idle() to rcu_is_watching. Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- include/linux/rcupdate.h | 18 +++++++++--------- include/linux/rcutiny.h | 16 ++++++++++++---- include/linux/rcutree.h | 2 +- kernel/lockdep.c | 4 ++-- kernel/rcupdate.c | 2 +- kernel/rcutiny.c | 6 +++--- kernel/rcutree.c | 36 ++++++++++++++++++------------------ 7 files changed, 46 insertions(+), 38 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a53a21a2808..39cbb889e20 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, } while (0) #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) -extern int rcu_is_cpu_idle(void); +extern bool __rcu_is_watching(void); #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ /* @@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; @@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void) if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; @@ -771,7 +771,7 @@ static inline void rcu_read_lock(void) __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } @@ -792,7 +792,7 @@ static inline void rcu_read_lock(void) */ static inline void rcu_read_unlock(void) { - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); __release(RCU); @@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void) local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } @@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); @@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void) preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } @@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void) */ static inline void rcu_read_unlock_sched(void) { - rcu_lockdep_assert(!rcu_is_cpu_idle(), + rcu_lockdep_assert(rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index bee66596487..09ebcbe9fd7 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -132,13 +132,21 @@ static inline void rcu_scheduler_starting(void) } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -#ifdef CONFIG_RCU_TRACE +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) -static inline bool __rcu_is_watching(void) +static inline bool rcu_is_watching(void) { - return !rcu_is_cpu_idle(); + return __rcu_is_watching(); } -#endif /* #ifdef CONFIG_RCU_TRACE */ +#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ + +static inline bool rcu_is_watching(void) +{ + return true; +} + + +#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 293613dfd2a..4b9c8154874 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -90,6 +90,6 @@ extern void exit_rcu(void); extern void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; -extern bool __rcu_is_watching(void); +extern bool rcu_is_watching(void); #endif /* __LINUX_RCUTREE_H */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e16c45b9ee7..4e8e14c34e4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", !rcu_lockdep_current_cpu_online() ? "RCU used illegally from offline CPU!\n" - : rcu_is_cpu_idle() + : !rcu_is_watching() ? "RCU used illegally from idle CPU!\n" : "", rcu_scheduler_active, debug_locks); @@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) * So complain bitterly if someone does call rcu_read_lock(), * rcu_read_lock_bh() and so on from extended quiescent states. */ - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) printk("RCU used illegally from extended quiescent state!\n"); lockdep_print_held_locks(curr); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index b02a339836b..3b3c0464d1e 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; - if (rcu_is_cpu_idle()) + if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index b4bc61874d7..0fa061dfa55 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c @@ -179,11 +179,11 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter); /* * Test whether RCU thinks that the current CPU is idle. */ -int rcu_is_cpu_idle(void) +bool __rcu_is_watching(void) { - return !rcu_dynticks_nesting; + return rcu_dynticks_nesting; } -EXPORT_SYMBOL(rcu_is_cpu_idle); +EXPORT_SYMBOL(__rcu_is_watching); #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1b123e179d7..981d0c15a38 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -655,34 +655,34 @@ void rcu_nmi_exit(void) } /** - * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle + * __rcu_is_watching - are RCU read-side critical sections safe? + * + * Return true if RCU is watching the running CPU, which means that + * this CPU can safely enter RCU read-side critical sections. Unlike + * rcu_is_watching(), the caller of __rcu_is_watching() must have at + * least disabled preemption. + */ +bool __rcu_is_watching(void) +{ + return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1; +} + +/** + * rcu_is_watching - see if RCU thinks that the current CPU is idle * * If the current CPU is in its idle loop and is neither in an interrupt * or NMI handler, return true. */ -int rcu_is_cpu_idle(void) +bool rcu_is_watching(void) { int ret; preempt_disable(); - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; + ret = __rcu_is_watching(); preempt_enable(); return ret; } -EXPORT_SYMBOL_GPL(rcu_is_cpu_idle); - -/** - * __rcu_is_watching - are RCU read-side critical sections safe? - * - * Return true if RCU is watching the running CPU, which means that - * this CPU can safely enter RCU read-side critical sections. Unlike - * rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at - * least disabled preemption. - */ -bool __rcu_is_watching(void) -{ - return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1); -} +EXPORT_SYMBOL_GPL(rcu_is_watching); #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) @@ -2268,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, * If called from an extended quiescent state, invoke the RCU * core in order to force a re-evaluation of RCU's idleness. */ - if (rcu_is_cpu_idle() && cpu_online(smp_processor_id())) + if (!rcu_is_watching() && cpu_online(smp_processor_id())) invoke_rcu_core(); /* If interrupts were disabled or CPU offline, don't invoke RCU core. */ -- cgit v1.2.3-70-g09d2