aboutsummaryrefslogtreecommitdiff
path: root/Documentation/kref.txt
diff options
context:
space:
mode:
Diffstat (limited to 'Documentation/kref.txt')
-rw-r--r--Documentation/kref.txt113
1 files changed, 100 insertions, 13 deletions
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index 42fe2844591..ddf85a5dde0 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -67,7 +67,7 @@ void more_data_handling(void *cb_data)
.
. do stuff with data here
.
- kref_put(data, data_release);
+ kref_put(&data->refcount, data_release);
}
int my_data_handler(void)
@@ -84,7 +84,6 @@ int my_data_handler(void)
task = kthread_run(more_data_handling, data, "more_data_handling");
if (task == ERR_PTR(-ENOMEM)) {
rv = -ENOMEM;
- kref_put(&data->refcount, data_release);
goto out;
}
@@ -141,10 +140,10 @@ The last rule (rule 3) is the nastiest one to handle. Say, for
instance, you have a list of items that are each kref-ed, and you wish
to get the first one. You can't just pull the first item off the list
and kref_get() it. That violates rule 3 because you are not already
-holding a valid pointer. You must add locks or semaphores. For
-instance:
+holding a valid pointer. You must add a mutex (or some other lock).
+For instance:
-static DECLARE_MUTEX(sem);
+static DEFINE_MUTEX(mutex);
static LIST_HEAD(q);
struct my_data
{
@@ -155,12 +154,12 @@ struct my_data
static struct my_data *get_entry()
{
struct my_data *entry = NULL;
- down(&sem);
+ mutex_lock(&mutex);
if (!list_empty(&q)) {
- entry = container_of(q.next, struct my_q_entry, link);
+ entry = container_of(q.next, struct my_data, link);
kref_get(&entry->refcount);
}
- up(&sem);
+ mutex_unlock(&mutex);
return entry;
}
@@ -174,9 +173,9 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- down(&sem);
+ mutex_lock(&mutex);
kref_put(&entry->refcount, release_entry);
- up(&sem);
+ mutex_unlock(&mutex);
}
The kref_put() return value is useful if you do not want to hold the
@@ -191,13 +190,13 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- down(&sem);
+ mutex_lock(&mutex);
if (kref_put(&entry->refcount, release_entry)) {
list_del(&entry->link);
- up(&sem);
+ mutex_unlock(&mutex);
kfree(entry);
} else
- up(&sem);
+ mutex_unlock(&mutex);
}
This is really more useful if you have to call other routines as part
@@ -214,3 +213,91 @@ presentation on krefs, which can be found at:
and:
http://www.kroah.com/linux/talks/ols_2004_kref_talk/
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+ struct my_data *entry = NULL;
+ mutex_lock(&mutex);
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ mutex_unlock(&mutex);
+ return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del(&entry->link);
+ mutex_unlock(&mutex);
+ kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+ struct rcu_head rhead;
+ .
+ struct kref refcount;
+ .
+ .
+};
+
+static struct my_data *get_entry_rcu()
+{
+ struct my_data *entry = NULL;
+ rcu_read_lock();
+ if (!list_empty(&q)) {
+ entry = container_of(q.next, struct my_data, link);
+ if (!kref_get_unless_zero(&entry->refcount))
+ entry = NULL;
+ }
+ rcu_read_unlock();
+ return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+ struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+ mutex_lock(&mutex);
+ list_del_rcu(&entry->link);
+ mutex_unlock(&mutex);
+ kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+ kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>