diff options
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
| -rw-r--r-- | net/mac80211/mesh_pathtbl.c | 621 |
1 files changed, 442 insertions, 179 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 0d2faacc3e8..cf032a8db9d 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <net/mac80211.h> +#include "wme.h" #include "ieee80211_i.h" #include "mesh.h" @@ -23,9 +24,12 @@ /* Keep the mean chain length below this constant */ #define MEAN_CHAIN_LEN 2 -#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ - time_after(jiffies, mpath->exp_time) && \ - !(mpath->flags & MESH_PATH_FIXED)) +static inline bool mpath_expired(struct mesh_path *mpath) +{ + return (mpath->flags & MESH_PATH_ACTIVE) && + time_after(jiffies, mpath->exp_time) && + !(mpath->flags & MESH_PATH_FIXED); +} struct mpath_node { struct hlist_node list; @@ -42,8 +46,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ int mesh_paths_generation; /* This lock will have the grow table function as writer and add / delete nodes - * as readers. When reading the table (i.e. doing lookups) we are well protected - * by RCU + * as readers. RCU provides sufficient protection only when reading the table + * (i.e. doing lookups). Adding or adding or removing nodes requires we take + * the read lock or we risk operating on an old table. The write lock is only + * needed when modifying the number of buckets a table. */ static DEFINE_RWLOCK(pathtbl_resize_lock); @@ -66,9 +72,9 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) * it's used twice. So it is illegal to do * for_each_mesh_entry(rcu_dereference(...), ...) */ -#define for_each_mesh_entry(tbl, p, node, i) \ +#define for_each_mesh_entry(tbl, node, i) \ for (i = 0; i <= tbl->hash_mask; i++) \ - hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) + hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) static struct mesh_table *mesh_table_alloc(int size_order) @@ -103,6 +109,7 @@ static struct mesh_table *mesh_table_alloc(int size_order) sizeof(newtbl->hash_rnd)); for (i = 0; i <= newtbl->hash_mask; i++) spin_lock_init(&newtbl->hashwlock[i]); + spin_lock_init(&newtbl->gates_lock); return newtbl; } @@ -118,6 +125,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) { struct hlist_head *mesh_hash; struct hlist_node *p, *q; + struct mpath_node *gate; int i; mesh_hash = tbl->hash_buckets; @@ -129,6 +137,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) } spin_unlock_bh(&tbl->hashwlock[i]); } + if (free_leafs) { + spin_lock_bh(&tbl->gates_lock); + hlist_for_each_entry_safe(gate, q, + tbl->known_gates, list) { + hlist_del(&gate->list); + kfree(gate); + } + kfree(tbl->known_gates); + spin_unlock_bh(&tbl->gates_lock); + } + __mesh_table_free(tbl); } @@ -146,6 +165,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl, newtbl->free_node = oldtbl->free_node; newtbl->mean_chain_len = oldtbl->mean_chain_len; newtbl->copy_node = oldtbl->copy_node; + newtbl->known_gates = oldtbl->known_gates; atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); oldhash = oldtbl->hash_buckets; @@ -164,12 +184,12 @@ errcopy: return -ENOMEM; } -static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, +static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) { /* Use last four bytes of hw addr and interface index as hash index */ - return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) - & tbl->hash_mask; + return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, + tbl->hash_rnd) & tbl->hash_mask; } @@ -186,81 +206,146 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) { struct sk_buff *skb; struct ieee80211_hdr *hdr; - struct sk_buff_head tmpq; unsigned long flags; rcu_assign_pointer(mpath->next_hop, sta); - __skb_queue_head_init(&tmpq); - spin_lock_irqsave(&mpath->frame_queue.lock, flags); - - while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { + skb_queue_walk(&mpath->frame_queue, skb) { hdr = (struct ieee80211_hdr *) skb->data; memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); - __skb_queue_tail(&tmpq, skb); + memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN); + ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr); } - skb_queue_splice(&tmpq, &mpath->frame_queue); spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); } +static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, + struct mesh_path *gate_mpath) +{ + struct ieee80211_hdr *hdr; + struct ieee80211s_hdr *mshdr; + int mesh_hdrlen, hdrlen; + char *next_hop; + + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + + if (!(mshdr->flags & MESH_FLAGS_AE)) { + /* size of the fixed part of the mesh header */ + mesh_hdrlen = 6; + + /* make room for the two extended addresses */ + skb_push(skb, 2 * ETH_ALEN); + memmove(skb->data, hdr, hdrlen + mesh_hdrlen); + + hdr = (struct ieee80211_hdr *) skb->data; + + /* we preserve the previous mesh header and only add + * the new addreses */ + mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + mshdr->flags = MESH_FLAGS_AE_A5_A6; + memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); + memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); + } + + /* update next hop */ + hdr = (struct ieee80211_hdr *) skb->data; + rcu_read_lock(); + next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; + memcpy(hdr->addr1, next_hop, ETH_ALEN); + rcu_read_unlock(); + memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN); + memcpy(hdr->addr3, dst_addr, ETH_ALEN); +} /** - * mesh_path_lookup - look up a path in the mesh path table - * @dst: hardware address (ETH_ALEN length) of destination - * @sdata: local subif * - * Returns: pointer to the mesh path structure, or NULL if not found + * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another * - * Locking: must be called within a read rcu section. + * This function is used to transfer or copy frames from an unresolved mpath to + * a gate mpath. The function also adds the Address Extension field and + * updates the next hop. + * + * If a frame already has an Address Extension field, only the next hop and + * destination addresses are updated. + * + * The gate mpath must be an active mpath with a valid mpath->next_hop. + * + * @mpath: An active mpath the frames will be sent to (i.e. the gate) + * @from_mpath: The failed mpath + * @copy: When true, copy all the frames to the new mpath queue. When false, + * move them. */ -struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) +static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, + struct mesh_path *from_mpath, + bool copy) { - struct mesh_path *mpath; - struct hlist_node *n; - struct hlist_head *bucket; - struct mesh_table *tbl; - struct mpath_node *node; + struct sk_buff *skb, *fskb, *tmp; + struct sk_buff_head failq; + unsigned long flags; - tbl = rcu_dereference(mesh_paths); + if (WARN_ON(gate_mpath == from_mpath)) + return; + if (WARN_ON(!gate_mpath->next_hop)) + return; - bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; - hlist_for_each_entry_rcu(node, n, bucket, list) { - mpath = node->mpath; - if (mpath->sdata == sdata && - memcmp(dst, mpath->dst, ETH_ALEN) == 0) { - if (MPATH_EXPIRED(mpath)) { - spin_lock_bh(&mpath->state_lock); - if (MPATH_EXPIRED(mpath)) - mpath->flags &= ~MESH_PATH_ACTIVE; - spin_unlock_bh(&mpath->state_lock); - } - return mpath; + __skb_queue_head_init(&failq); + + spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); + skb_queue_splice_init(&from_mpath->frame_queue, &failq); + spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); + + skb_queue_walk_safe(&failq, fskb, tmp) { + if (skb_queue_len(&gate_mpath->frame_queue) >= + MESH_FRAME_QUEUE_LEN) { + mpath_dbg(gate_mpath->sdata, "mpath queue full!\n"); + break; } + + skb = skb_copy(fskb, GFP_ATOMIC); + if (WARN_ON(!skb)) + break; + + prepare_for_gate(skb, gate_mpath->dst, gate_mpath); + skb_queue_tail(&gate_mpath->frame_queue, skb); + + if (copy) + continue; + + __skb_unlink(fskb, &failq); + kfree_skb(fskb); } - return NULL; + + mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n", + gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue)); + + if (!copy) + return; + + spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); + skb_queue_splice(&failq, &from_mpath->frame_queue); + spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); } -struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) + +static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, + struct ieee80211_sub_if_data *sdata) { struct mesh_path *mpath; - struct hlist_node *n; struct hlist_head *bucket; - struct mesh_table *tbl; struct mpath_node *node; - tbl = rcu_dereference(mpp_paths); - bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; - hlist_for_each_entry_rcu(node, n, bucket, list) { + hlist_for_each_entry_rcu(node, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - memcmp(dst, mpath->dst, ETH_ALEN) == 0) { - if (MPATH_EXPIRED(mpath)) { + ether_addr_equal(dst, mpath->dst)) { + if (mpath_expired(mpath)) { spin_lock_bh(&mpath->state_lock); - if (MPATH_EXPIRED(mpath)) - mpath->flags &= ~MESH_PATH_ACTIVE; + mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&mpath->state_lock); } return mpath; @@ -269,6 +354,27 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) return NULL; } +/** + * mesh_path_lookup - look up a path in the mesh path table + * @sdata: local subif + * @dst: hardware address (ETH_ALEN length) of destination + * + * Returns: pointer to the mesh path structure, or NULL if not found + * + * Locking: must be called within a read rcu section. + */ +struct mesh_path * +mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) +{ + return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); +} + +struct mesh_path * +mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) +{ + return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata); +} + /** * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index @@ -279,22 +385,21 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) * * Locking: must be called within a read rcu section. */ -struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) +struct mesh_path * +mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) { struct mesh_table *tbl = rcu_dereference(mesh_paths); struct mpath_node *node; - struct hlist_node *p; int i; int j = 0; - for_each_mesh_entry(tbl, p, node, i) { + for_each_mesh_entry(tbl, node, i) { if (sdata && node->mpath->sdata != sdata) continue; if (j++ == idx) { - if (MPATH_EXPIRED(node->mpath)) { + if (mpath_expired(node->mpath)) { spin_lock_bh(&node->mpath->state_lock); - if (MPATH_EXPIRED(node->mpath)) - node->mpath->flags &= ~MESH_PATH_ACTIVE; + node->mpath->flags &= ~MESH_PATH_ACTIVE; spin_unlock_bh(&node->mpath->state_lock); } return node->mpath; @@ -305,15 +410,93 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data } /** + * mesh_path_add_gate - add the given mpath to a mesh gate to our path table + * @mpath: gate path to add to table + */ +int mesh_path_add_gate(struct mesh_path *mpath) +{ + struct mesh_table *tbl; + struct mpath_node *gate, *new_gate; + int err; + + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + + hlist_for_each_entry_rcu(gate, tbl->known_gates, list) + if (gate->mpath == mpath) { + err = -EEXIST; + goto err_rcu; + } + + new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); + if (!new_gate) { + err = -ENOMEM; + goto err_rcu; + } + + mpath->is_gate = true; + mpath->sdata->u.mesh.num_gates++; + new_gate->mpath = mpath; + spin_lock_bh(&tbl->gates_lock); + hlist_add_head_rcu(&new_gate->list, tbl->known_gates); + spin_unlock_bh(&tbl->gates_lock); + mpath_dbg(mpath->sdata, + "Mesh path: Recorded new gate: %pM. %d known gates\n", + mpath->dst, mpath->sdata->u.mesh.num_gates); + err = 0; +err_rcu: + rcu_read_unlock(); + return err; +} + +/** + * mesh_gate_del - remove a mesh gate from the list of known gates + * @tbl: table which holds our list of known gates + * @mpath: gate mpath + * + * Locking: must be called inside rcu_read_lock() section + */ +static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) +{ + struct mpath_node *gate; + struct hlist_node *q; + + hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) { + if (gate->mpath != mpath) + continue; + spin_lock_bh(&tbl->gates_lock); + hlist_del_rcu(&gate->list); + kfree_rcu(gate, rcu); + spin_unlock_bh(&tbl->gates_lock); + mpath->sdata->u.mesh.num_gates--; + mpath->is_gate = false; + mpath_dbg(mpath->sdata, + "Mesh path: Deleted gate: %pM. %d known gates\n", + mpath->dst, mpath->sdata->u.mesh.num_gates); + break; + } +} + +/** + * mesh_gate_num - number of gates known to this interface + * @sdata: subif data + */ +int mesh_gate_num(struct ieee80211_sub_if_data *sdata) +{ + return sdata->u.mesh.num_gates; +} + +/** * mesh_path_add - allocate and add a new path to the mesh path table - * @addr: destination address of the path (ETH_ALEN length) + * @dst: destination address of the path (ETH_ALEN length) * @sdata: local subif * * Returns: 0 on success * * State: the initial state of the new path is set to 0 */ -int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) +struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, + const u8 *dst) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; @@ -321,20 +504,34 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; - struct hlist_node *n; int grow = 0; - int err = 0; + int err; u32 hash_idx; - if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ - return -ENOTSUPP; + return ERR_PTR(-ENOTSUPP); if (is_multicast_ether_addr(dst)) - return -ENOTSUPP; + return ERR_PTR(-ENOTSUPP); if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) - return -ENOSPC; + return ERR_PTR(-ENOSPC); + + read_lock_bh(&pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(); + + hash_idx = mesh_table_hash(dst, sdata, tbl); + bucket = &tbl->hash_buckets[hash_idx]; + + spin_lock(&tbl->hashwlock[hash_idx]); + + hlist_for_each_entry(node, bucket, list) { + mpath = node->mpath; + if (mpath->sdata == sdata && + ether_addr_equal(dst, mpath->dst)) + goto found; + } err = -ENOMEM; new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); @@ -345,8 +542,9 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) if (!new_node) goto err_node_alloc; - read_lock_bh(&pathtbl_resize_lock); memcpy(new_mpath->dst, dst, ETH_ALEN); + eth_broadcast_addr(new_mpath->rann_snd_addr); + new_mpath->is_root = false; new_mpath->sdata = sdata; new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); @@ -357,20 +555,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) spin_lock_init(&new_mpath->state_lock); init_timer(&new_mpath->timer); - tbl = resize_dereference_mesh_paths(); - - hash_idx = mesh_table_hash(dst, sdata, tbl); - bucket = &tbl->hash_buckets[hash_idx]; - - spin_lock_bh(&tbl->hashwlock[hash_idx]); - - err = -EEXIST; - hlist_for_each_entry(node, n, bucket, list) { - mpath = node->mpath; - if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) - goto err_exists; - } - hlist_add_head_rcu(&new_node->list, bucket); if (atomic_inc_return(&tbl->entries) >= tbl->mean_chain_len * (tbl->hash_mask + 1)) @@ -378,23 +562,23 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) mesh_paths_generation++; - spin_unlock_bh(&tbl->hashwlock[hash_idx]); - read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); ieee80211_queue_work(&local->hw, &sdata->work); } - return 0; - -err_exists: - spin_unlock_bh(&tbl->hashwlock[hash_idx]); + mpath = new_mpath; +found: + spin_unlock(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); - kfree(new_node); + return mpath; + err_node_alloc: kfree(new_mpath); err_path_alloc: atomic_dec(&sdata->u.mesh.mpaths); - return err; + spin_unlock(&tbl->hashwlock[hash_idx]); + read_unlock_bh(&pathtbl_resize_lock); + return ERR_PTR(err); } static void mesh_table_free_rcu(struct rcu_head *rcu) @@ -445,7 +629,8 @@ void mesh_mpp_table_grow(void) write_unlock_bh(&pathtbl_resize_lock); } -int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) +int mpp_path_add(struct ieee80211_sub_if_data *sdata, + const u8 *dst, const u8 *mpp) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; @@ -453,12 +638,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) struct mesh_path *mpath, *new_mpath; struct mpath_node *node, *new_node; struct hlist_head *bucket; - struct hlist_node *n; int grow = 0; int err = 0; u32 hash_idx; - if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) + if (ether_addr_equal(dst, sdata->vif.addr)) /* never add ourselves as neighbours */ return -ENOTSUPP; @@ -481,6 +665,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) new_mpath->flags = 0; skb_queue_head_init(&new_mpath->frame_queue); new_node->mpath = new_mpath; + init_timer(&new_mpath->timer); new_mpath->exp_time = jiffies; spin_lock_init(&new_mpath->state_lock); @@ -489,12 +674,13 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) hash_idx = mesh_table_hash(dst, sdata, tbl); bucket = &tbl->hash_buckets[hash_idx]; - spin_lock_bh(&tbl->hashwlock[hash_idx]); + spin_lock(&tbl->hashwlock[hash_idx]); err = -EEXIST; - hlist_for_each_entry(node, n, bucket, list) { + hlist_for_each_entry(node, bucket, list) { mpath = node->mpath; - if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) + if (mpath->sdata == sdata && + ether_addr_equal(dst, mpath->dst)) goto err_exists; } @@ -503,7 +689,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) tbl->mean_chain_len * (tbl->hash_mask + 1)) grow = 1; - spin_unlock_bh(&tbl->hashwlock[hash_idx]); + spin_unlock(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); if (grow) { set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); @@ -512,7 +698,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) return 0; err_exists: - spin_unlock_bh(&tbl->hashwlock[hash_idx]); + spin_unlock(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); kfree(new_node); err_node_alloc: @@ -536,35 +722,59 @@ void mesh_plink_broken(struct sta_info *sta) static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct mesh_path *mpath; struct mpath_node *node; - struct hlist_node *p; struct ieee80211_sub_if_data *sdata = sta->sdata; int i; rcu_read_lock(); tbl = rcu_dereference(mesh_paths); - for_each_mesh_entry(tbl, p, node, i) { + for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - spin_lock_bh(&mpath->state_lock); if (rcu_dereference(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { + spin_lock_bh(&mpath->state_lock); mpath->flags &= ~MESH_PATH_ACTIVE; ++mpath->sn; spin_unlock_bh(&mpath->state_lock); - mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, - mpath->dst, cpu_to_le32(mpath->sn), - cpu_to_le16(PERR_RCODE_DEST_UNREACH), - bcast, sdata); - } else - spin_unlock_bh(&mpath->state_lock); + mesh_path_error_tx(sdata, + sdata->u.mesh.mshcfg.element_ttl, + mpath->dst, mpath->sn, + WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); + } } rcu_read_unlock(); } +static void mesh_path_node_reclaim(struct rcu_head *rp) +{ + struct mpath_node *node = container_of(rp, struct mpath_node, rcu); + struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + + del_timer_sync(&node->mpath->timer); + atomic_dec(&sdata->u.mesh.mpaths); + kfree(node->mpath); + kfree(node); +} + +/* needs to be called with the corresponding hashwlock taken */ +static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) +{ + struct mesh_path *mpath; + mpath = node->mpath; + spin_lock(&mpath->state_lock); + mpath->flags |= MESH_PATH_RESOLVING; + if (mpath->is_gate) + mesh_gate_del(tbl, mpath); + hlist_del_rcu(&node->list); + call_rcu(&node->rcu, mesh_path_node_reclaim); + spin_unlock(&mpath->state_lock); + atomic_dec(&tbl->entries); +} + /** * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches * - * @sta - mesh peer to match + * @sta: mesh peer to match * * RCU notes: this function is called when a mesh plink transitions from * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that @@ -577,46 +787,61 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; - struct hlist_node *p; int i; rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); - for_each_mesh_entry(tbl, p, node, i) { + read_lock_bh(&pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(); + for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (rcu_dereference(mpath->next_hop) == sta) - mesh_path_del(mpath->dst, mpath->sdata); + if (rcu_dereference(mpath->next_hop) == sta) { + spin_lock(&tbl->hashwlock[i]); + __mesh_path_del(tbl, node); + spin_unlock(&tbl->hashwlock[i]); + } } + read_unlock_bh(&pathtbl_resize_lock); rcu_read_unlock(); } -void mesh_path_flush(struct ieee80211_sub_if_data *sdata) +static void table_flush_by_iface(struct mesh_table *tbl, + struct ieee80211_sub_if_data *sdata) { - struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; - struct hlist_node *p; int i; - rcu_read_lock(); - tbl = rcu_dereference(mesh_paths); - for_each_mesh_entry(tbl, p, node, i) { + WARN_ON(!rcu_read_lock_held()); + for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (mpath->sdata == sdata) - mesh_path_del(mpath->dst, mpath->sdata); + if (mpath->sdata != sdata) + continue; + spin_lock_bh(&tbl->hashwlock[i]); + __mesh_path_del(tbl, node); + spin_unlock_bh(&tbl->hashwlock[i]); } - rcu_read_unlock(); } -static void mesh_path_node_reclaim(struct rcu_head *rp) +/** + * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface + * + * This function deletes both mesh paths as well as mesh portal paths. + * + * @sdata: interface data to match + * + */ +void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) { - struct mpath_node *node = container_of(rp, struct mpath_node, rcu); - struct ieee80211_sub_if_data *sdata = node->mpath->sdata; + struct mesh_table *tbl; - del_timer_sync(&node->mpath->timer); - atomic_dec(&sdata->u.mesh.mpaths); - kfree(node->mpath); - kfree(node); + rcu_read_lock(); + read_lock_bh(&pathtbl_resize_lock); + tbl = resize_dereference_mesh_paths(); + table_flush_by_iface(tbl, sdata); + tbl = resize_dereference_mpp_paths(); + table_flush_by_iface(tbl, sdata); + read_unlock_bh(&pathtbl_resize_lock); + rcu_read_unlock(); } /** @@ -627,13 +852,12 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) * * Returns: 0 if successful */ -int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) +int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; struct hlist_head *bucket; - struct hlist_node *n; int hash_idx; int err = 0; @@ -642,17 +866,12 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) hash_idx = mesh_table_hash(addr, sdata, tbl); bucket = &tbl->hash_buckets[hash_idx]; - spin_lock_bh(&tbl->hashwlock[hash_idx]); - hlist_for_each_entry(node, n, bucket, list) { + spin_lock(&tbl->hashwlock[hash_idx]); + hlist_for_each_entry(node, bucket, list) { mpath = node->mpath; if (mpath->sdata == sdata && - memcmp(addr, mpath->dst, ETH_ALEN) == 0) { - spin_lock_bh(&mpath->state_lock); - mpath->flags |= MESH_PATH_RESOLVING; - hlist_del_rcu(&node->list); - call_rcu(&node->rcu, mesh_path_node_reclaim); - atomic_dec(&tbl->entries); - spin_unlock_bh(&mpath->state_lock); + ether_addr_equal(addr, mpath->dst)) { + __mesh_path_del(tbl, node); goto enddel; } } @@ -660,7 +879,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) err = -ENXIO; enddel: mesh_paths_generation++; - spin_unlock_bh(&tbl->hashwlock[hash_idx]); + spin_unlock(&tbl->hashwlock[hash_idx]); read_unlock_bh(&pathtbl_resize_lock); return err; } @@ -681,38 +900,68 @@ void mesh_path_tx_pending(struct mesh_path *mpath) } /** + * mesh_path_send_to_gates - sends pending frames to all known mesh gates + * + * @mpath: mesh path whose queue will be emptied + * + * If there is only one gate, the frames are transferred from the failed mpath + * queue to that gate's queue. If there are more than one gates, the frames + * are copied from each gate to the next. After frames are copied, the + * mpath queues are emptied onto the transmission queue. + */ +int mesh_path_send_to_gates(struct mesh_path *mpath) +{ + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct mesh_table *tbl; + struct mesh_path *from_mpath = mpath; + struct mpath_node *gate = NULL; + bool copy = false; + struct hlist_head *known_gates; + + rcu_read_lock(); + tbl = rcu_dereference(mesh_paths); + known_gates = tbl->known_gates; + rcu_read_unlock(); + + if (!known_gates) + return -EHOSTUNREACH; + + hlist_for_each_entry_rcu(gate, known_gates, list) { + if (gate->mpath->sdata != sdata) + continue; + + if (gate->mpath->flags & MESH_PATH_ACTIVE) { + mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst); + mesh_path_move_to_queue(gate->mpath, from_mpath, copy); + from_mpath = gate->mpath; + copy = true; + } else { + mpath_dbg(sdata, + "Not forwarding %p (flags %#x)\n", + gate->mpath, gate->mpath->flags); + } + } + + hlist_for_each_entry_rcu(gate, known_gates, list) + if (gate->mpath->sdata == sdata) { + mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); + mesh_path_tx_pending(gate->mpath); + } + + return (from_mpath == mpath) ? -EHOSTUNREACH : 0; +} + +/** * mesh_path_discard_frame - discard a frame whose path could not be resolved * * @skb: frame to discard * @sdata: network subif the frame was to be sent through * - * If the frame was being forwarded from another MP, a PERR frame will be sent - * to the precursor. The precursor's address (i.e. the previous hop) was saved - * in addr1 of the frame-to-be-forwarded, and would only be overwritten once - * the destination is successfully resolved. - * * Locking: the function must me called within a rcu_read_lock region */ -void mesh_path_discard_frame(struct sk_buff *skb, - struct ieee80211_sub_if_data *sdata) +void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct mesh_path *mpath; - u32 sn = 0; - - if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { - u8 *ra, *da; - - da = hdr->addr3; - ra = hdr->addr1; - mpath = mesh_path_lookup(da, sdata); - if (mpath) - sn = ++mpath->sn; - mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, - cpu_to_le32(sn), - cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); - } - kfree_skb(skb); sdata->u.mesh.mshstats.dropped_frames_no_route++; } @@ -728,9 +977,8 @@ void mesh_path_flush_pending(struct mesh_path *mpath) { struct sk_buff *skb; - while ((skb = skb_dequeue(&mpath->frame_queue)) && - (mpath->flags & MESH_PATH_ACTIVE)) - mesh_path_discard_frame(skb, mpath->sdata); + while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) + mesh_path_discard_frame(mpath->sdata, skb); } /** @@ -790,6 +1038,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) int mesh_pathtbl_init(void) { struct mesh_table *tbl_path, *tbl_mpp; + int ret; tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); if (!tbl_path) @@ -797,21 +1046,40 @@ int mesh_pathtbl_init(void) tbl_path->free_node = &mesh_path_node_free; tbl_path->copy_node = &mesh_path_node_copy; tbl_path->mean_chain_len = MEAN_CHAIN_LEN; + tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); + if (!tbl_path->known_gates) { + ret = -ENOMEM; + goto free_path; + } + INIT_HLIST_HEAD(tbl_path->known_gates); + tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); if (!tbl_mpp) { - mesh_table_free(tbl_path, true); - return -ENOMEM; + ret = -ENOMEM; + goto free_path; } tbl_mpp->free_node = &mesh_path_node_free; tbl_mpp->copy_node = &mesh_path_node_copy; tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; + tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); + if (!tbl_mpp->known_gates) { + ret = -ENOMEM; + goto free_mpp; + } + INIT_HLIST_HEAD(tbl_mpp->known_gates); /* Need no locking since this is during init */ RCU_INIT_POINTER(mesh_paths, tbl_path); RCU_INIT_POINTER(mpp_paths, tbl_mpp); return 0; + +free_mpp: + mesh_table_free(tbl_mpp, true); +free_path: + mesh_table_free(tbl_path, true); + return ret; } void mesh_path_expire(struct ieee80211_sub_if_data *sdata) @@ -819,23 +1087,18 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) struct mesh_table *tbl; struct mesh_path *mpath; struct mpath_node *node; - struct hlist_node *p; int i; rcu_read_lock(); tbl = rcu_dereference(mesh_paths); - for_each_mesh_entry(tbl, p, node, i) { + for_each_mesh_entry(tbl, node, i) { if (node->mpath->sdata != sdata) continue; mpath = node->mpath; - spin_lock_bh(&mpath->state_lock); if ((!(mpath->flags & MESH_PATH_RESOLVING)) && (!(mpath->flags & MESH_PATH_FIXED)) && - time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { - spin_unlock_bh(&mpath->state_lock); - mesh_path_del(mpath->dst, mpath->sdata); - } else - spin_unlock_bh(&mpath->state_lock); + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) + mesh_path_del(mpath->sdata, mpath->dst); } rcu_read_unlock(); } @@ -843,6 +1106,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) void mesh_pathtbl_unregister(void) { /* no need for locking during exit path */ - mesh_table_free(rcu_dereference_raw(mesh_paths), true); - mesh_table_free(rcu_dereference_raw(mpp_paths), true); + mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); + mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); } |
