aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-14 11:00:52 +0200
committerJohn W. Linville <linville@tuxdriver.com>2011-05-16 14:25:29 -0400
commit1928ecab620907a0953f811316d05f367f3f4dba (patch)
treec97ce24a7d86ad58c148ef8905c72c5705d12f4a /net
parentd07c7cf49ae7c488e778c4d668f4cc10bd2fa971 (diff)
mac80211: fix and simplify mesh locking
The locking in mesh_{mpath,mpp}_table_grow not only has an rcu_read_unlock() missing, it's also racy (though really only technically since it's invoked from a single function only) since it obtains the new size of the table without any locking, so two invocations of the function could attempt the same resize. Additionally, it uses synchronize_rcu() which is rather expensive and can be avoided trivially here. Modify the functions to only use the table lock and use call_rcu() instead of synchronize_rcu(). Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net')
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c44
2 files changed, 25 insertions, 22 deletions
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e7c5fddb480..eb733c0d61a 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -120,6 +120,7 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
+ * rcu_head: RCU head to free the table
*/
struct mesh_table {
/* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
+
+ struct rcu_head rcu_head;
};
/* Recent multicast cache */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index f775202552e..74021365b8c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -370,52 +370,52 @@ err_path_alloc:
return err;
}
+static void mesh_table_free_rcu(struct rcu_head *rcu)
+{
+ struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
+
+ mesh_table_free(tbl, false);
+}
+
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
- rcu_read_lock();
- newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
- if (!newtbl)
- return;
write_lock_bh(&pathtbl_resize_lock);
+ newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
+ if (!newtbl)
+ goto out;
oldtbl = mesh_paths;
if (mesh_table_grow(mesh_paths, newtbl) < 0) {
- rcu_read_unlock();
__mesh_table_free(newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- return;
+ goto out;
}
- rcu_read_unlock();
rcu_assign_pointer(mesh_paths, newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
+
+ out:
+ write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
- rcu_read_lock();
- newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
- if (!newtbl)
- return;
write_lock_bh(&pathtbl_resize_lock);
+ newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
+ if (!newtbl)
+ goto out;
oldtbl = mpp_paths;
if (mesh_table_grow(mpp_paths, newtbl) < 0) {
- rcu_read_unlock();
__mesh_table_free(newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- return;
+ goto out;
}
- rcu_read_unlock();
rcu_assign_pointer(mpp_paths, newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
+ call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ out:
+ write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)