diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2013-05-01 08:47:44 -0700 |
commit | bf61c8840efe60fd8f91446860b63338fb424158 (patch) | |
tree | 7a71832407a4f0d6346db773343f4c3ae2257b19 /kernel/srcu.c | |
parent | 5846115b30f3a881e542c8bfde59a699c1c13740 (diff) | |
parent | 0c6a61657da78098472fd0eb71cc01f2387fa1bb (diff) |
Merge branch 'next' into for-linus
Prepare first set of updates for 3.10 merge window.
Diffstat (limited to 'kernel/srcu.c')
-rw-r--r-- | kernel/srcu.c | 53 |
1 files changed, 27 insertions, 26 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c index 97c465ebd84..01d5ccb8bfe 100644 --- a/kernel/srcu.c +++ b/kernel/srcu.c @@ -16,8 +16,10 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2006 + * Copyright (C) Fujitsu, 2012 * * Author: Paul McKenney <paulmck@us.ibm.com> + * Lai Jiangshan <laijs@cn.fujitsu.com> * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU/ *.txt @@ -34,6 +36,10 @@ #include <linux/delay.h> #include <linux/srcu.h> +#include <trace/events/rcu.h> + +#include "rcu.h" + /* * Initialize an rcu_batch structure to empty. */ @@ -92,9 +98,6 @@ static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from) } } -/* single-thread state-machine */ -static void process_srcu(struct work_struct *work); - static int init_srcu_struct_fields(struct srcu_struct *sp) { sp->completed = 0; @@ -279,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp) */ void cleanup_srcu_struct(struct srcu_struct *sp) { - int sum; - - sum = srcu_readers_active(sp); - WARN_ON(sum); /* Leakage unless caller handles error. */ - if (sum != 0) - return; + if (WARN_ON(srcu_readers_active(sp))) + return; /* Leakage unless caller handles error. */ free_percpu(sp->per_cpu_ref); sp->per_cpu_ref = NULL; } @@ -299,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp) { int idx; + idx = ACCESS_ONCE(sp->completed) & 0x1; preempt_disable(); - idx = rcu_dereference_index_check(sp->completed, - rcu_read_lock_sched_held()) & 0x1; ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1; smp_mb(); /* B */ /* Avoid leaking the critical section. */ ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1; @@ -318,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock); */ void __srcu_read_unlock(struct srcu_struct *sp, int idx) { - preempt_disable(); smp_mb(); /* C */ /* Avoid leaking the critical section. */ - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1; - preempt_enable(); + this_cpu_dec(sp->per_cpu_ref->c[idx]); } EXPORT_SYMBOL_GPL(__srcu_read_unlock); @@ -420,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) !lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section"); + might_sleep(); init_completion(&rcu.completion); head->next = NULL; @@ -452,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) * synchronize_srcu - wait for prior SRCU read-side critical-section completion * @sp: srcu_struct with which to synchronize. * - * Flip the completed counter, and wait for the old count to drain to zero. - * As with classic RCU, the updater must use some separate means of - * synchronizing concurrent updates. Can block; must be called from - * process context. + * Wait for the count to drain to zero of both indexes. To avoid the + * possible starvation of synchronize_srcu(), it waits for the count of + * the index=((->completed & 1) ^ 1) to drain to zero at first, + * and then flip the completed and wait for the count of the other index. + * + * Can block; must be called from process context. * * Note that it is illegal to call synchronize_srcu() from the corresponding * SRCU read-side critical section; doing so will result in deadlock. @@ -464,7 +463,9 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount) */ void synchronize_srcu(struct srcu_struct *sp) { - __synchronize_srcu(sp, SYNCHRONIZE_SRCU_TRYCOUNT); + __synchronize_srcu(sp, rcu_expedited + ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT + : SYNCHRONIZE_SRCU_TRYCOUNT); } EXPORT_SYMBOL_GPL(synchronize_srcu); @@ -475,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu); * Wait for an SRCU grace period to elapse, but be more aggressive about * spinning rather than blocking when waiting. * - * Note that it is illegal to call this function while holding any lock - * that is acquired by a CPU-hotplug notifier. It is also illegal to call - * synchronize_srcu_expedited() from the corresponding SRCU read-side - * critical section; doing so will result in deadlock. However, it is - * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct - * from some other srcu_struct's read-side critical section, as long as + * Note that it is also illegal to call synchronize_srcu_expedited() + * from the corresponding SRCU read-side critical section; + * doing so will result in deadlock. However, it is perfectly legal + * to call synchronize_srcu_expedited() on one srcu_struct from some + * other srcu_struct's read-side critical section, as long as * the resulting graph of srcu_structs is acyclic. */ void synchronize_srcu_expedited(struct srcu_struct *sp) @@ -637,7 +637,7 @@ static void srcu_reschedule(struct srcu_struct *sp) /* * This is the work-queue function that handles SRCU grace periods. */ -static void process_srcu(struct work_struct *work) +void process_srcu(struct work_struct *work) { struct srcu_struct *sp; @@ -648,3 +648,4 @@ static void process_srcu(struct work_struct *work) srcu_invoke_callbacks(sp); srcu_reschedule(sp); } +EXPORT_SYMBOL_GPL(process_srcu); |