aboutsummaryrefslogtreecommitdiff
path: root/lib/percpu-refcount.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-31 15:26:45 -0700
committerTejun Heo <tj@kernel.org>2013-06-03 15:36:41 -0700
commit215e262f2aeba378aa192da07c30770f9925a4bf (patch)
treec854461e40f3ce9dde45f7128679b20a362643f8 /lib/percpu-refcount.c
parent042dd60ca6dec9a02cefa8edd67de386e35755d6 (diff)
percpu: implement generic percpu refcounting
This implements a refcount with similar semantics to atomic_get()/atomic_dec_and_test() - but percpu. It also implements two stage shutdown, as we need it to tear down the percpu counts. Before dropping the initial refcount, you must call percpu_ref_kill(); this puts the refcount in "shutting down mode" and switches back to a single atomic refcount with the appropriate barriers (synchronize_rcu()). It's also legal to call percpu_ref_kill() multiple times - it only returns true once, so callers don't have to reimplement shutdown synchronization. [akpm@linux-foundation.org: fix build] [akpm@linux-foundation.org: coding-style tweak] Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Cc: Tejun Heo <tj@kernel.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Reviewed-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib/percpu-refcount.c')
-rw-r--r--lib/percpu-refcount.c128
1 files changed, 128 insertions, 0 deletions
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
new file mode 100644
index 00000000000..6f0ffd702a0
--- /dev/null
+++ b/lib/percpu-refcount.c
@@ -0,0 +1,128 @@
+#define pr_fmt(fmt) "%s: " fmt "\n", __func__
+
+#include <linux/kernel.h>
+#include <linux/percpu-refcount.h>
+
+/*
+ * Initially, a percpu refcount is just a set of percpu counters. Initially, we
+ * don't try to detect the ref hitting 0 - which means that get/put can just
+ * increment or decrement the local counter. Note that the counter on a
+ * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
+ * percpu counters will all sum to the correct value
+ *
+ * (More precisely: because moduler arithmatic is commutative the sum of all the
+ * pcpu_count vars will be equal to what it would have been if all the gets and
+ * puts were done to a single integer, even if some of the percpu integers
+ * overflow or underflow).
+ *
+ * The real trick to implementing percpu refcounts is shutdown. We can't detect
+ * the ref hitting 0 on every put - this would require global synchronization
+ * and defeat the whole purpose of using percpu refs.
+ *
+ * What we do is require the user to keep track of the initial refcount; we know
+ * the ref can't hit 0 before the user drops the initial ref, so as long as we
+ * convert to non percpu mode before the initial ref is dropped everything
+ * works.
+ *
+ * Converting to non percpu mode is done with some RCUish stuff in
+ * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
+ * can't hit 0 before we've added up all the percpu refs.
+ */
+
+#define PCPU_COUNT_BIAS (1U << 31)
+
+/**
+ * percpu_ref_init - initialize a percpu refcount
+ * @ref: ref to initialize
+ * @release: function which will be called when refcount hits 0
+ *
+ * Initializes the refcount in single atomic counter mode with a refcount of 1;
+ * analagous to atomic_set(ref, 1).
+ *
+ * Note that @release must not sleep - it may potentially be called from RCU
+ * callback context by percpu_ref_kill().
+ */
+int percpu_ref_init(struct percpu_ref *ref, percpu_ref_release *release)
+{
+ atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+ ref->pcpu_count = alloc_percpu(unsigned);
+ if (!ref->pcpu_count)
+ return -ENOMEM;
+
+ ref->release = release;
+ return 0;
+}
+
+static void percpu_ref_kill_rcu(struct rcu_head *rcu)
+{
+ struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+ unsigned __percpu *pcpu_count;
+ unsigned count = 0;
+ int cpu;
+
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+
+ /* Mask out PCPU_REF_DEAD */
+ pcpu_count = (unsigned __percpu *)
+ (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
+
+ for_each_possible_cpu(cpu)
+ count += *per_cpu_ptr(pcpu_count, cpu);
+
+ free_percpu(pcpu_count);
+
+ pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
+
+ /*
+ * It's crucial that we sum the percpu counters _before_ adding the sum
+ * to &ref->count; since gets could be happening on one cpu while puts
+ * happen on another, adding a single cpu's count could cause
+ * @ref->count to hit 0 before we've got a consistent value - but the
+ * sum of all the counts will be consistent and correct.
+ *
+ * Subtracting the bias value then has to happen _after_ adding count to
+ * &ref->count; we need the bias value to prevent &ref->count from
+ * reaching 0 before we add the percpu counts. But doing it at the same
+ * time is equivalent and saves us atomic operations:
+ */
+
+ atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
+
+ /*
+ * Now we're in single atomic_t mode with a consistent refcount, so it's
+ * safe to drop our initial ref:
+ */
+ percpu_ref_put(ref);
+}
+
+/**
+ * percpu_ref_kill - safely drop initial ref
+ *
+ * Must be used to drop the initial ref on a percpu refcount; must be called
+ * precisely once before shutdown.
+ *
+ * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
+ * percpu counters and dropping the initial ref.
+ */
+void percpu_ref_kill(struct percpu_ref *ref)
+{
+ unsigned __percpu *pcpu_count, *old, *new;
+
+ pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+
+ do {
+ if (REF_STATUS(pcpu_count) == PCPU_REF_DEAD) {
+ WARN(1, "percpu_ref_kill() called more than once!\n");
+ return;
+ }
+
+ old = pcpu_count;
+ new = (unsigned __percpu *)
+ (((unsigned long) pcpu_count)|PCPU_REF_DEAD);
+
+ pcpu_count = cmpxchg(&ref->pcpu_count, old, new);
+ } while (pcpu_count != old);
+
+ call_rcu(&ref->rcu, percpu_ref_kill_rcu);
+}