diff options
Diffstat (limited to 'drivers/net/wireless/ti/wlcore/tx.c')
| -rw-r--r-- | drivers/net/wireless/ti/wlcore/tx.c | 679 |
1 files changed, 476 insertions, 203 deletions
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index 6893bc20799..40b43115f83 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> +#include <linux/spinlock.h> #include "wlcore.h" #include "debug.h" @@ -72,7 +73,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) return id; } -static void wl1271_free_tx_id(struct wl1271 *wl, int id) +void wl1271_free_tx_id(struct wl1271 *wl, int id) { if (__test_and_clear_bit(id, wl->tx_frames_map)) { if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) @@ -82,28 +83,44 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id) wl->tx_frames_cnt--; } } +EXPORT_SYMBOL(wl1271_free_tx_id); static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, + struct wl12xx_vif *wlvif, struct sk_buff *skb) { struct ieee80211_hdr *hdr; + hdr = (struct ieee80211_hdr *)(skb->data + + sizeof(struct wl1271_tx_hw_descr)); + if (!ieee80211_is_auth(hdr->frame_control)) + return; + /* * add the station to the known list before transmitting the * authentication response. this way it won't get de-authed by FW * when transmitting too soon. */ - hdr = (struct ieee80211_hdr *)(skb->data + - sizeof(struct wl1271_tx_hw_descr)); - if (ieee80211_is_auth(hdr->frame_control)) - wl1271_acx_set_inconnection_sta(wl, hdr->addr1); + wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); + + /* + * ROC for 1 second on the AP channel for completing the connection. + * Note the ROC will be continued by the update_sta_state callbacks + * once the station reaches the associated state. + */ + wlcore_update_inconn_sta(wl, wlvif, NULL, true); + wlvif->pending_auth_reply_time = jiffies; + cancel_delayed_work(&wlvif->pending_auth_complete_work); + ieee80211_queue_delayed_work(wl->hw, + &wlvif->pending_auth_complete_work, + msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); } static void wl1271_tx_regulate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) { - bool fw_ps, single_sta; + bool fw_ps; u8 tx_pkts; if (WARN_ON(!test_bit(hlid, wlvif->links_map))) @@ -111,15 +128,19 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); tx_pkts = wl->links[hlid].allocated_pkts; - single_sta = (wl->active_sta_count == 1); /* * if in FW PS and there is enough data in FW we can put the link * into high-level PS and clean out its TX queues. - * Make an exception if this is the only connected station. In this - * case FW-memory congestion is not a problem. + * Make an exception if this is the only connected link. In this + * case FW-memory congestion is less of a problem. + * Note that a single connected STA means 2*ap_count + 1 active links, + * since we must account for the global and broadcast AP links + * for each AP. The "fw_ps" check assures us the other link is a STA + * connected to the AP. Otherwise the FW would not set the PSM bit. */ - if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) + if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && + tx_pkts >= WL1271_PS_STA_MAX_PACKETS) wl12xx_ps_link_start(wl, wlvif, hlid, true); } @@ -127,17 +148,15 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) { return wl->dummy_packet == skb; } +EXPORT_SYMBOL(wl12xx_is_dummy_packet); -u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) +static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, + struct sk_buff *skb, struct ieee80211_sta *sta) { - struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); - - if (control->control.sta) { + if (sta) { struct wl1271_station *wl_sta; - wl_sta = (struct wl1271_station *) - control->control.sta->drv_priv; + wl_sta = (struct wl1271_station *)sta->drv_priv; return wl_sta->hlid; } else { struct ieee80211_hdr *hdr; @@ -146,67 +165,61 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, return wl->system_hlid; hdr = (struct ieee80211_hdr *)skb->data; - if (ieee80211_is_mgmt(hdr->frame_control)) - return wlvif->ap.global_hlid; - else + if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) return wlvif->ap.bcast_hlid; + else + return wlvif->ap.global_hlid; } } u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) + struct sk_buff *skb, struct ieee80211_sta *sta) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - - if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) - return wl->system_hlid; + struct ieee80211_tx_info *control; if (wlvif->bss_type == BSS_TYPE_AP_BSS) - return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); + return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); - if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || - test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && - !ieee80211_is_auth(hdr->frame_control) && - !ieee80211_is_assoc_req(hdr->frame_control)) - return wlvif->sta.hlid; - else + control = IEEE80211_SKB_CB(skb); + if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { + wl1271_debug(DEBUG_TX, "tx offchannel"); return wlvif->dev_hlid; + } + + return wlvif->sta.hlid; } unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, unsigned int packet_length) { - if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) - return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); - else + if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || + !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) return ALIGN(packet_length, WL1271_TX_ALIGN_TO); + else + return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); } EXPORT_SYMBOL(wlcore_calc_packet_alignment); static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct sk_buff *skb, u32 extra, u32 buf_offset, - u8 hlid) + u8 hlid, bool is_gem) { struct wl1271_tx_hw_descr *desc; u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; u32 total_blocks; int id, ret = -EBUSY, ac; - u32 spare_blocks = wl->normal_tx_spare; - bool is_dummy = false; + u32 spare_blocks; - if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) + if (buf_offset + total_len > wl->aggr_buf_size) return -EAGAIN; + spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); + /* allocate free identifier for the packet */ id = wl1271_alloc_tx_id(wl, skb); if (id < 0) return id; - if (unlikely(wl12xx_is_dummy_packet(wl, skb))) - is_dummy = true; - else if (wlvif->is_gem) - spare_blocks = wl->gem_tx_spare; - total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); if (total_blocks <= wl->tx_blocks_available) { @@ -221,16 +234,19 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, wl->tx_blocks_available -= total_blocks; wl->tx_allocated_blocks += total_blocks; - /* If the FW was empty before, arm the Tx watchdog */ - if (wl->tx_allocated_blocks == total_blocks) + /* + * If the FW was empty before, arm the Tx watchdog. Also do + * this on the first Tx after resume, as we always cancel the + * watchdog on suspend. + */ + if (wl->tx_allocated_blocks == total_blocks || + test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) wl12xx_rearm_tx_watchdog_locked(wl); ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); wl->tx_allocated_pkts[ac]++; - if (!is_dummy && wlvif && - wlvif->bss_type == BSS_TYPE_AP_BSS && - test_bit(hlid, wlvif->ap.sta_hlid_map)) + if (test_bit(hlid, wl->links_map)) wl->links[hlid].allocated_pkts++; ret = 0; @@ -268,6 +284,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (extra) { int hdrlen = ieee80211_hdrlen(frame_control); memmove(frame_start, hdr, hdrlen); + skb_set_network_header(skb, skb_network_offset(skb) + extra); } /* configure packet life time */ @@ -296,28 +313,43 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; } else if (wlvif) { + u8 session_id = wl->session_ids[hlid]; + + if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && + (wlvif->bss_type == BSS_TYPE_AP_BSS)) + session_id = 0; + /* configure the tx attributes */ - tx_attr = wlvif->session_counter << - TX_HW_ATTR_OFST_SESSION_COUNTER; + tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; } desc->hlid = hlid; if (is_dummy || !wlvif) rate_idx = 0; else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { - /* if the packets are destined for AP (have a STA entry) - send them with AP rate policies, otherwise use default - basic rates */ - if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) + /* + * if the packets are data packets + * send them with AP rate policies (EAPOLs are an exception), + * otherwise use default basic rates + */ + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) + rate_idx = wlvif->sta.basic_rate_idx; + else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) rate_idx = wlvif->sta.p2p_rate_idx; - else if (control->control.sta) + else if (ieee80211_is_data(frame_control)) rate_idx = wlvif->sta.ap_rate_idx; else rate_idx = wlvif->sta.basic_rate_idx; } else { if (hlid == wlvif->ap.global_hlid) rate_idx = wlvif->ap.mgmt_rate_idx; - else if (hlid == wlvif->ap.bcast_hlid) + else if (hlid == wlvif->ap.bcast_hlid || + skb->protocol == cpu_to_be16(ETH_P_PAE) || + !ieee80211_is_data(frame_control)) + /* + * send non-data, bcast and EAPOLs using the + * min basic rate + */ rate_idx = wlvif->ap.bcast_rate_idx; else rate_idx = wlvif->ap.ucast_rate_idx[ac]; @@ -330,32 +362,43 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, ieee80211_has_protected(frame_control)) tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; - desc->reserved = 0; + /* send EAPOL frames as voice */ + if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) + tx_attr |= TX_HW_ATTR_EAPOL_FRAME; + desc->tx_attr = cpu_to_le16(tx_attr); + wlcore_hw_set_tx_desc_csum(wl, desc, skb); wlcore_hw_set_tx_desc_data_len(wl, desc, skb); } /* caller must hold wl->mutex */ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb, u32 buf_offset) + struct sk_buff *skb, u32 buf_offset, u8 hlid) { struct ieee80211_tx_info *info; u32 extra = 0; int ret = 0; u32 total_len; - u8 hlid; bool is_dummy; + bool is_gem = false; - if (!skb) + if (!skb) { + wl1271_error("discarding null skb"); + return -EINVAL; + } + + if (hlid == WL12XX_INVALID_LINK_ID) { + wl1271_error("invalid hlid. dropping skb 0x%p", skb); return -EINVAL; + } info = IEEE80211_SKB_CB(skb); - /* TODO: handle dummy packets on multi-vifs */ is_dummy = wl12xx_is_dummy_packet(wl, skb); - if (info->control.hw_key && + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) extra = WL1271_EXTRA_SPACE_TKIP; @@ -367,27 +410,25 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104); - if (unlikely(is_wep && wlvif->default_key != idx)) { + if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { ret = wl1271_set_default_wep_key(wl, wlvif, idx); if (ret < 0) return ret; wlvif->default_key = idx; } - } - hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); - if (hlid == WL12XX_INVALID_LINK_ID) { - wl1271_error("invalid hlid. dropping skb 0x%p", skb); - return -EINVAL; + + is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); } - ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); + ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, + is_gem); if (ret < 0) return ret; wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { - wl1271_tx_ap_update_inconnection_sta(wl, skb); + wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); wl1271_tx_regulate_link(wl, wlvif, hlid); } @@ -425,10 +466,10 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, rate_set >>= 1; } - /* MCS rates indication are on bits 16 - 23 */ + /* MCS rates indication are on bits 16 - 31 */ rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; - for (bit = 0; bit < 8; bit++) { + for (bit = 0; bit < 16; bit++) { if (rate_set & 0x1) enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); rate_set >>= 1; @@ -439,24 +480,23 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, void wl1271_handle_tx_low_watermark(struct wl1271 *wl) { - unsigned long flags; int i; + struct wl12xx_vif *wlvif; - for (i = 0; i < NUM_TX_QUEUES; i++) { - if (test_bit(i, &wl->stopped_queues_map) && - wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { - /* firmware buffer has space, restart queues */ - spin_lock_irqsave(&wl->wl_lock, flags); - ieee80211_wake_queue(wl->hw, - wl1271_tx_get_mac80211_queue(i)); - clear_bit(i, &wl->stopped_queues_map); - spin_unlock_irqrestore(&wl->wl_lock, flags); + wl12xx_for_each_wlvif(wl, wlvif) { + for (i = 0; i < NUM_TX_QUEUES; i++) { + if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK) && + wlvif->tx_queue_count[i] <= + WL1271_TX_QUEUE_LOW_WATERMARK) + /* firmware buffer has space, restart queues */ + wlcore_wake_queue(wl, wlvif, i, + WLCORE_QUEUE_STOP_REASON_WATERMARK); } } } -static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, - struct sk_buff_head *queues) +static int wlcore_select_ac(struct wl1271 *wl) { int i, q = -1, ac; u32 min_pkts = 0xffffffff; @@ -470,60 +510,77 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, */ for (i = 0; i < NUM_TX_QUEUES; i++) { ac = wl1271_tx_get_queue(i); - if (!skb_queue_empty(&queues[ac]) && - (wl->tx_allocated_pkts[ac] < min_pkts)) { + if (wl->tx_queue_count[ac] && + wl->tx_allocated_pkts[ac] < min_pkts) { q = ac; min_pkts = wl->tx_allocated_pkts[q]; } } - if (q == -1) - return NULL; - - return &queues[q]; + return q; } -static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, - struct wl1271_link *lnk) +static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, + struct wl1271_link *lnk, u8 q) { struct sk_buff *skb; unsigned long flags; - struct sk_buff_head *queue; - queue = wl1271_select_queue(wl, lnk->tx_queue); - if (!queue) - return NULL; - - skb = skb_dequeue(queue); + skb = skb_dequeue(&lnk->tx_queue[q]); if (skb) { - int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); wl->tx_queue_count[q]--; + if (lnk->wlvif) { + WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); + lnk->wlvif->tx_queue_count[q]--; + } spin_unlock_irqrestore(&wl->wl_lock, flags); } return skb; } -static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, - struct wl12xx_vif *wlvif) +static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, + u8 hlid, u8 ac, + u8 *low_prio_hlid) +{ + struct wl1271_link *lnk = &wl->links[hlid]; + + if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { + if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && + !skb_queue_empty(&lnk->tx_queue[ac]) && + wlcore_hw_lnk_low_prio(wl, hlid, lnk)) + /* we found the first non-empty low priority queue */ + *low_prio_hlid = hlid; + + return NULL; + } + + return wlcore_lnk_dequeue(wl, lnk, ac); +} + +static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, + struct wl12xx_vif *wlvif, + u8 ac, u8 *hlid, + u8 *low_prio_hlid) { struct sk_buff *skb = NULL; int i, h, start_hlid; /* start from the link after the last one */ - start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; + start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; /* dequeue according to AC, round robin on each link */ - for (i = 0; i < WL12XX_MAX_LINKS; i++) { - h = (start_hlid + i) % WL12XX_MAX_LINKS; + for (i = 0; i < wl->num_links; i++) { + h = (start_hlid + i) % wl->num_links; /* only consider connected stations */ if (!test_bit(h, wlvif->links_map)) continue; - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); + skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, + low_prio_hlid); if (!skb) continue; @@ -534,53 +591,90 @@ static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, if (!skb) wlvif->last_tx_hlid = 0; + *hlid = wlvif->last_tx_hlid; return skb; } -static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) +static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) { unsigned long flags; struct wl12xx_vif *wlvif = wl->last_wlvif; struct sk_buff *skb = NULL; + int ac; + u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; + + ac = wlcore_select_ac(wl); + if (ac < 0) + goto out; /* continue from last wlvif (round robin) */ if (wlvif) { wl12xx_for_each_wlvif_continue(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); - if (skb) { - wl->last_wlvif = wlvif; - break; - } + if (!wlvif->tx_queue_count[ac]) + continue; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); + if (!skb) + continue; + + wl->last_wlvif = wlvif; + break; } } /* dequeue from the system HLID before the restarting wlvif list */ - if (!skb) - skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); + if (!skb) { + skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, + ac, &low_prio_hlid); + if (skb) { + *hlid = wl->system_hlid; + wl->last_wlvif = NULL; + } + } - /* do a new pass over the wlvif list */ + /* Do a new pass over the wlvif list. But no need to continue + * after last_wlvif. The previous pass should have found it. */ if (!skb) { wl12xx_for_each_wlvif(wl, wlvif) { - skb = wl12xx_vif_skb_dequeue(wl, wlvif); + if (!wlvif->tx_queue_count[ac]) + goto next; + + skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, + &low_prio_hlid); if (skb) { wl->last_wlvif = wlvif; break; } - /* - * No need to continue after last_wlvif. The previous - * pass should have found it. - */ +next: if (wlvif == wl->last_wlvif) break; } } + /* no high priority skbs found - but maybe a low priority one? */ + if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { + struct wl1271_link *lnk = &wl->links[low_prio_hlid]; + skb = wlcore_lnk_dequeue(wl, lnk, ac); + + WARN_ON(!skb); /* we checked this before */ + *hlid = low_prio_hlid; + + /* ensure proper round robin in the vif/link levels */ + wl->last_wlvif = lnk->wlvif; + if (lnk->wlvif) + lnk->wlvif->last_tx_hlid = low_prio_hlid; + + } + +out: if (!skb && test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { int q; skb = wl->dummy_packet; + *hlid = wl->system_hlid; q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); spin_lock_irqsave(&wl->wl_lock, flags); WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); @@ -592,7 +686,7 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) } static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, - struct sk_buff *skb) + struct sk_buff *skb, u8 hlid) { unsigned long flags; int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); @@ -600,16 +694,17 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (wl12xx_is_dummy_packet(wl, skb)) { set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); } else { - u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); skb_queue_head(&wl->links[hlid].tx_queue[q], skb); /* make sure we dequeue the same packet next time */ - wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % - WL12XX_MAX_LINKS; + wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % + wl->num_links; } spin_lock_irqsave(&wl->wl_lock, flags); wl->tx_queue_count[q]++; + if (wlvif) + wlvif->tx_queue_count[q]++; spin_unlock_irqrestore(&wl->wl_lock, flags); } @@ -636,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) timeout = wl->conf.rx_streaming.duration; wl12xx_for_each_wlvif_sta(wl, wlvif) { bool found = false; - for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { + for_each_set_bit(hlid, active_hlids, wl->num_links) { if (test_bit(hlid, wlvif->links_map)) { found = true; break; @@ -656,37 +751,58 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) } } -void wl1271_tx_work_locked(struct wl1271 *wl) +/* + * Returns failure values only in case of failed bus ops within this function. + * wl1271_prepare_tx_frame retvals won't be returned in order to avoid + * triggering recovery by higher layers when not necessary. + * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery + * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame + * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING + * within prepare_tx_frame code but there's nothing we should do about those + * as well. + */ +int wlcore_tx_work_locked(struct wl1271 *wl) { struct wl12xx_vif *wlvif; struct sk_buff *skb; struct wl1271_tx_hw_descr *desc; - u32 buf_offset = 0; + u32 buf_offset = 0, last_len = 0; bool sent_packets = false; - unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; - int ret; + unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; + int ret = 0; + int bus_ret = 0; + u8 hlid; - if (unlikely(wl->state == WL1271_STATE_OFF)) - return; + if (unlikely(wl->state != WLCORE_STATE_ON)) + return 0; - while ((skb = wl1271_skb_dequeue(wl))) { + while ((skb = wl1271_skb_dequeue(wl, &hlid))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool has_data = false; wlvif = NULL; - if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) + if (!wl12xx_is_dummy_packet(wl, skb)) wlvif = wl12xx_vif_to_data(info->control.vif); + else + hlid = wl->system_hlid; has_data = wlvif && wl1271_tx_is_data_present(skb); - ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); + ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, + hlid); if (ret == -EAGAIN) { /* * Aggregation buffer is full. * Flush buffer and try again. */ - wl1271_skb_queue_head(wl, wlvif, skb); - wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, - buf_offset, true); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); + + buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, + last_len); + bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, + wl->aggr_buf, buf_offset, true); + if (bus_ret < 0) + goto out; + sent_packets = true; buf_offset = 0; continue; @@ -695,7 +811,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl) * Firmware buffer is full. * Queue back last skb, and stop aggregating. */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); /* No work left, avoid scheduling redundant tx work */ set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); goto out_ack; @@ -705,12 +821,13 @@ void wl1271_tx_work_locked(struct wl1271 *wl) * fw still expects dummy packet, * so re-enqueue it */ - wl1271_skb_queue_head(wl, wlvif, skb); + wl1271_skb_queue_head(wl, wlvif, skb, hlid); else ieee80211_free_txskb(wl->hw, skb); goto out_ack; } - buf_offset += ret; + last_len = ret; + buf_offset += last_len; wl->tx_packets_count++; if (has_data) { desc = (struct wl1271_tx_hw_descr *) skb->data; @@ -720,8 +837,12 @@ void wl1271_tx_work_locked(struct wl1271 *wl) out_ack: if (buf_offset) { - wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, - buf_offset, true); + buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); + bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, + buf_offset, true); + if (bus_ret < 0) + goto out; + sent_packets = true; } if (sent_packets) { @@ -729,13 +850,19 @@ out_ack: * Interrupt the firmware with the new packets. This is only * required for older hardware revisions */ - if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) - wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, - wl->tx_packets_count); + if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { + bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, + wl->tx_packets_count); + if (bus_ret < 0) + goto out; + } wl1271_handle_tx_low_watermark(wl); } wl12xx_rearm_rx_streaming(wl, active_hlids); + +out: + return bus_ret; } void wl1271_tx_work(struct work_struct *work) @@ -748,7 +875,11 @@ void wl1271_tx_work(struct work_struct *work) if (ret < 0) goto out; - wl1271_tx_work_locked(wl); + ret = wlcore_tx_work_locked(wl); + if (ret < 0) { + wl12xx_queue_recovery_work(wl); + goto out; + } wl1271_ps_elp_sleep(wl); out: @@ -826,30 +957,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, wl->stats.retry_count += result->ack_failures; - /* - * update sequence number only when relevant, i.e. only in - * sessions of TKIP, AES and GEM (not in open or WEP sessions) - */ - if (info->control.hw_key && - (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP || - info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || - info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { - u8 fw_lsb = result->tx_security_sequence_number_lsb; - u8 cur_lsb = wlvif->tx_security_last_seq_lsb; - - /* - * update security sequence number, taking care of potential - * wrap-around - */ - wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; - wlvif->tx_security_last_seq_lsb = fw_lsb; - } - /* remove private header from packet */ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); /* remove TKIP header space if present */ - if (info->control.hw_key && + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { int hdrlen = ieee80211_get_hdrlen_from_skb(skb); memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, @@ -869,22 +982,27 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, } /* Called upon reception of a TX complete interrupt */ -void wl1271_tx_complete(struct wl1271 *wl) +int wlcore_tx_complete(struct wl1271 *wl) { - struct wl1271_acx_mem_map *memmap = - (struct wl1271_acx_mem_map *)wl->target_mem_map; + struct wl1271_acx_mem_map *memmap = wl->target_mem_map; u32 count, fw_counter; u32 i; + int ret; /* read the tx results from the chipset */ - wl1271_read(wl, le32_to_cpu(memmap->tx_result), - wl->tx_res_if, sizeof(*wl->tx_res_if), false); + ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), + wl->tx_res_if, sizeof(*wl->tx_res_if), false); + if (ret < 0) + goto out; + fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); /* write host counter to chipset (to ack) */ - wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + - offsetof(struct wl1271_tx_hw_res_if, - tx_result_host_counter), fw_counter); + ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + + offsetof(struct wl1271_tx_hw_res_if, + tx_result_host_counter), fw_counter); + if (ret < 0) + goto out; count = fw_counter - wl->tx_results_count; wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); @@ -904,8 +1022,11 @@ void wl1271_tx_complete(struct wl1271 *wl) wl->tx_results_count++; } + +out: + return ret; } -EXPORT_SYMBOL(wl1271_tx_complete); +EXPORT_SYMBOL(wlcore_tx_complete); void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) { @@ -914,10 +1035,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) unsigned long flags; struct ieee80211_tx_info *info; int total[NUM_TX_QUEUES]; + struct wl1271_link *lnk = &wl->links[hlid]; for (i = 0; i < NUM_TX_QUEUES; i++) { total[i] = 0; - while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { + while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); if (!wl12xx_is_dummy_packet(wl, skb)) { @@ -932,8 +1054,11 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) } spin_lock_irqsave(&wl->wl_lock, flags); - for (i = 0; i < NUM_TX_QUEUES; i++) + for (i = 0; i < NUM_TX_QUEUES; i++) { wl->tx_queue_count[i] -= total[i]; + if (lnk->wlvif) + lnk->wlvif->tx_queue_count[i] -= total[i]; + } spin_unlock_irqrestore(&wl->wl_lock, flags); wl1271_handle_tx_low_watermark(wl); @@ -945,43 +1070,43 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) int i; /* TX failure */ - for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { - if (wlvif->bss_type == BSS_TYPE_AP_BSS) + for_each_set_bit(i, wlvif->links_map, wl->num_links) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS && + i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { + /* this calls wl12xx_free_link */ wl1271_free_sta(wl, wlvif, i); - else - wlvif->sta.ba_rx_bitmap = 0; - - wl->links[i].allocated_pkts = 0; - wl->links[i].prev_freed_pkts = 0; + } else { + u8 hlid = i; + wl12xx_free_link(wl, wlvif, &hlid); + } } wlvif->last_tx_hlid = 0; + for (i = 0; i < NUM_TX_QUEUES; i++) + wlvif->tx_queue_count[i] = 0; } /* caller must hold wl->mutex and TX must be stopped */ -void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) +void wl12xx_tx_reset(struct wl1271 *wl) { int i; struct sk_buff *skb; struct ieee80211_tx_info *info; /* only reset the queues if something bad happened */ - if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) { - for (i = 0; i < WL12XX_MAX_LINKS; i++) + if (wl1271_tx_total_queue_count(wl) != 0) { + for (i = 0; i < wl->num_links; i++) wl1271_tx_reset_link_queues(wl, i); for (i = 0; i < NUM_TX_QUEUES; i++) wl->tx_queue_count[i] = 0; } - wl->stopped_queues_map = 0; - /* * Make sure the driver is at a consistent state, in case this * function is called from a context other than interface removal. * This call will always wake the TX queues. */ - if (reset_tx_queues) - wl1271_handle_tx_low_watermark(wl); + wl1271_handle_tx_low_watermark(wl); for (i = 0; i < wl->num_tx_desc; i++) { if (wl->tx_frames[i] == NULL) @@ -998,7 +1123,8 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) */ info = IEEE80211_SKB_CB(skb); skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); - if (info->control.hw_key && + if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && + info->control.hw_key && info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { int hdrlen = ieee80211_get_hdrlen_from_skb(skb); @@ -1020,32 +1146,57 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) /* caller must *NOT* hold wl->mutex */ void wl1271_tx_flush(struct wl1271 *wl) { - unsigned long timeout; + unsigned long timeout, start_time; int i; - timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); + start_time = jiffies; + timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); + + /* only one flush should be in progress, for consistent queue state */ + mutex_lock(&wl->flush_mutex); + + mutex_lock(&wl->mutex); + if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { + mutex_unlock(&wl->mutex); + goto out; + } + + wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); while (!time_after(jiffies, timeout)) { - mutex_lock(&wl->mutex); - wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", + wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", wl->tx_frames_cnt, wl1271_tx_total_queue_count(wl)); + + /* force Tx and give the driver some time to flush data */ + mutex_unlock(&wl->mutex); + if (wl1271_tx_total_queue_count(wl)) + wl1271_tx_work(&wl->tx_work); + msleep(20); + mutex_lock(&wl->mutex); + if ((wl->tx_frames_cnt == 0) && (wl1271_tx_total_queue_count(wl) == 0)) { - mutex_unlock(&wl->mutex); - return; + wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", + jiffies_to_msecs(jiffies - start_time)); + goto out_wake; } - mutex_unlock(&wl->mutex); - msleep(1); } - wl1271_warning("Unable to flush all TX buffers, timed out."); + wl1271_warning("Unable to flush all TX buffers, " + "timed out (timeout %d ms", + WL1271_TX_FLUSH_TIMEOUT / 1000); /* forcibly flush all Tx buffers on our queues */ - mutex_lock(&wl->mutex); - for (i = 0; i < WL12XX_MAX_LINKS; i++) + for (i = 0; i < wl->num_links; i++) wl1271_tx_reset_link_queues(wl, i); + +out_wake: + wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); mutex_unlock(&wl->mutex); +out: + mutex_unlock(&wl->flush_mutex); } +EXPORT_SYMBOL_GPL(wl1271_tx_flush); u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) { @@ -1054,3 +1205,125 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) return BIT(__ffs(rate_set)); } +EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); + +void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue, enum wlcore_queue_stop_reason reason) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + bool stopped = !!wl->queue_stop_reasons[hwq]; + + /* queue should not be stopped for this reason */ + WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); + + if (stopped) + return; + + ieee80211_stop_queue(wl->hw, hwq); +} + +void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + wlcore_stop_queue_locked(wl, wlvif, queue, reason); + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* queue should not be clear for this reason */ + WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); + + if (wl->queue_stop_reasons[hwq]) + goto out; + + ieee80211_wake_queue(wl->hw, hwq); + +out: + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_stop_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as stopped */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(test_and_set_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are stopped. + */ + ieee80211_stop_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +void wlcore_wake_queues(struct wl1271 *wl, + enum wlcore_queue_stop_reason reason) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&wl->wl_lock, flags); + + /* mark all possible queues as awake */ + for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) + WARN_ON_ONCE(!test_and_clear_bit(reason, + &wl->queue_stop_reasons[i])); + + /* use the global version to make sure all vifs in mac80211 we don't + * know are woken up. + */ + ieee80211_wake_queues(wl->hw); + + spin_unlock_irqrestore(&wl->wl_lock, flags); +} + +bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + unsigned long flags; + bool stopped; + + spin_lock_irqsave(&wl->wl_lock, flags); + stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, + reason); + spin_unlock_irqrestore(&wl->wl_lock, flags); + + return stopped; +} + +bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, + struct wl12xx_vif *wlvif, u8 queue, + enum wlcore_queue_stop_reason reason) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + assert_spin_locked(&wl->wl_lock); + return test_bit(reason, &wl->queue_stop_reasons[hwq]); +} + +bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 queue) +{ + int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); + + assert_spin_locked(&wl->wl_lock); + return !!wl->queue_stop_reasons[hwq]; +} |
