aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c813
1 files changed, 587 insertions, 226 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a3d79c7a21c..5642ccceca7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -15,9 +15,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
@@ -33,9 +31,10 @@
#include "rt2x00.h"
#include "rt2x00lib.h"
-struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
+struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct data_queue *queue = entry->queue;
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
struct sk_buff *skb;
struct skb_frame_desc *skbdesc;
unsigned int frame_size;
@@ -46,7 +45,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
* The frame size includes descriptor size, because the
* hardware directly receive the frame into the skbuffer.
*/
- frame_size = entry->queue->data_size + entry->queue->desc_size;
+ frame_size = queue->data_size + queue->desc_size + queue->winfo_size;
/*
* The payload should be aligned to a 4-byte boundary,
@@ -60,7 +59,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
head_size += 8;
tail_size += 8;
}
@@ -68,7 +67,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
/*
* Allocate skbuffer.
*/
- skb = dev_alloc_skb(frame_size + head_size + tail_size);
+ skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
if (!skb)
return NULL;
@@ -86,25 +85,36 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
- if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
- skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
- skb->data,
- skb->len,
- DMA_FROM_DEVICE);
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
+ dma_addr_t skb_dma;
+
+ skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ skbdesc->skb_dma = skb_dma;
skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
}
return skb;
}
-void rt2x00queue_map_txskb(struct queue_entry *entry)
+int rt2x00queue_map_txskb(struct queue_entry *entry)
{
struct device *dev = entry->queue->rt2x00dev->dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
skbdesc->skb_dma =
dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma)))
+ return -ENOMEM;
+
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
+ return 0;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -148,19 +158,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
skb_trim(skb, frame_length);
}
-void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
-{
- unsigned int frame_length = skb->len;
- unsigned int align = ALIGN_SIZE(skb, header_length);
-
- if (!align)
- return;
-
- skb_push(skb, align);
- memmove(skb->data, skb->data + align, frame_length);
- skb_trim(skb, frame_length);
-}
-
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
unsigned int payload_length = skb->len - header_length;
@@ -199,33 +196,51 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- unsigned int l2pad = L2PAD_SIZE(header_length);
+ /*
+ * L2 padding is only present if the skb contains more than just the
+ * IEEE 802.11 header.
+ */
+ unsigned int l2pad = (skb->len > header_length) ?
+ L2PAD_SIZE(header_length) : 0;
if (!l2pad)
return;
- memmove(skb->data + header_length, skb->data + header_length + l2pad,
- skb->len - header_length - l2pad);
-
- skb_trim(skb, skb->len - l2pad);
+ memmove(skb->data + l2pad, skb->data, header_length);
+ skb_pull(skb, l2pad);
}
-static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
- unsigned long irqflags;
+ u16 seqno;
- if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
- unlikely(!tx_info->control.vif))
+ if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+
+ if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
+ /*
+ * rt2800 has a H/W (or F/W) bug, device incorrectly increase
+ * seqno on retransmited data (non-QOS) frames. To workaround
+ * the problem let's generate seqno in software if QOS is
+ * disabled.
+ */
+ if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
+ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+ else
+ /* H/W will generate sequence number */
+ return;
+ }
+
/*
- * Hardware should insert sequence counter.
- * FIXME: We insert a software sequence counter first for
- * hardware that doesn't support hardware sequence counting.
+ * The hardware is not able to insert a sequence number. Assign a
+ * software generated one here.
*
* This is wrong because beacons are not getting sequence
* numbers assigned properly.
@@ -234,43 +249,50 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
* sequence counting per-frame, since those will override the
* sequence counter given by mac80211.
*/
- spin_lock_irqsave(&intf->seqlock, irqflags);
-
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
- intf->seqno += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
+ seqno = atomic_add_return(0x10, &intf->seqno);
+ else
+ seqno = atomic_read(&intf->seqno);
- spin_unlock_irqrestore(&intf->seqlock, irqflags);
-
- __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seqno);
}
-static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
unsigned int data_length;
unsigned int duration;
unsigned int residual;
+ /*
+ * Determine with what IFS priority this frame should be send.
+ * Set ifs to IFS_SIFS when the this is not the first fragment,
+ * or this fragment came after RTS/CTS.
+ */
+ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
+ txdesc->u.plcp.ifs = IFS_BACKOFF;
+ else
+ txdesc->u.plcp.ifs = IFS_SIFS;
+
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
- data_length = entry->skb->len + 4;
- data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
+ data_length = skb->len + 4;
+ data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
/*
* PLCP setup
* Length calculation depends on OFDM/CCK rate.
*/
- txdesc->signal = hwrate->plcp;
- txdesc->service = 0x04;
+ txdesc->u.plcp.signal = hwrate->plcp;
+ txdesc->u.plcp.service = 0x04;
if (hwrate->flags & DEV_RATE_OFDM) {
- txdesc->length_high = (data_length >> 6) & 0x3f;
- txdesc->length_low = data_length & 0x3f;
+ txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
+ txdesc->u.plcp.length_low = data_length & 0x3f;
} else {
/*
* Convert length to microseconds.
@@ -285,38 +307,132 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* Check if we need to set the Length Extension
*/
if (hwrate->bitrate == 110 && residual <= 30)
- txdesc->service |= 0x80;
+ txdesc->u.plcp.service |= 0x80;
}
- txdesc->length_high = (duration >> 8) & 0xff;
- txdesc->length_low = duration & 0xff;
+ txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
+ txdesc->u.plcp.length_low = duration & 0xff;
/*
* When preamble is enabled we should set the
* preamble bit for the signal.
*/
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- txdesc->signal |= 0x08;
+ txdesc->u.plcp.signal |= 0x08;
}
}
-static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
- struct txentry_desc *txdesc)
+static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
+ struct txentry_desc *txdesc,
+ struct ieee80211_sta *sta,
+ const struct rt2x00_rate *hwrate)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
- struct ieee80211_rate *rate =
- ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
- const struct rt2x00_rate *hwrate;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rt2x00_sta *sta_priv = NULL;
+
+ if (sta) {
+ txdesc->u.ht.mpdu_density =
+ sta->ht_cap.ampdu_density;
+
+ sta_priv = sta_to_rt2x00_sta(sta);
+ txdesc->u.ht.wcid = sta_priv->wcid;
+ }
+
+ /*
+ * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
+ * mcs rate to be used
+ */
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ txdesc->u.ht.mcs = txrate->idx;
+
+ /*
+ * MIMO PS should be set to 1 for STA's using dynamic SM PS
+ * when using more then one tx stream (>MCS7).
+ */
+ if (sta && txdesc->u.ht.mcs > 7 &&
+ sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+ __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
+ } else {
+ txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ txdesc->u.ht.mcs |= 0x08;
+ }
+
+ if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
+ if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->u.ht.txop = TXOP_SIFS;
+ else
+ txdesc->u.ht.txop = TXOP_BACKOFF;
+
+ /* Left zero on all other settings. */
+ return;
+ }
+
+ txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
+
+ /*
+ * Only one STBC stream is supported for now.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ txdesc->u.ht.stbc = 1;
+
+ /*
+ * This frame is eligible for an AMPDU, however, don't aggregate
+ * frames that are intended to probe a specific tx rate.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+ __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+
+ /*
+ * Set 40Mhz mode if necessary (for legacy rates this will
+ * duplicate the frame to both channels).
+ */
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
+ txrate->flags & IEEE80211_TX_RC_DUP_DATA)
+ __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
+ if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+ __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+
+ /*
+ * Determine IFS values
+ * - Use TXOP_BACKOFF for management frames except beacons
+ * - Use TXOP_SIFS for fragment bursts
+ * - Use TXOP_HTTXOP for everything else
+ *
+ * Note: rt2800 devices won't use CTS protection (if used)
+ * for frames not transmitted with TXOP_HTTXOP
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_beacon(hdr->frame_control))
+ txdesc->u.ht.txop = TXOP_BACKOFF;
+ else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->u.ht.txop = TXOP_SIFS;
+ else
+ txdesc->u.ht.txop = TXOP_HTTXOP;
+}
+
+static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
+ struct txentry_desc *txdesc,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_rate *rate;
+ const struct rt2x00_rate *hwrate = NULL;
memset(txdesc, 0, sizeof(*txdesc));
/*
* Header and frame information.
*/
- txdesc->length = entry->skb->len;
- txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->length = skb->len;
+ txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
/*
* Check whether this frame is to be acked.
@@ -362,42 +478,44 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame, except for a frame that has been injected
- * through a monitor interface. This latter is needed for testing a
- * monitor interface.
+ * to be inserted into the frame.
*/
- if ((ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control)) &&
- (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
- /*
- * Determine with what IFS priority this frame should be send.
- * Set ifs to IFS_SIFS when the this is not the first fragment,
- * or this fragment came after RTS/CTS.
- */
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
- !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
+ !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
- txdesc->ifs = IFS_BACKOFF;
- } else
- txdesc->ifs = IFS_SIFS;
/*
* Determine rate modulation.
*/
- hwrate = rt2x00_get_rate(rate->hw_value);
- txdesc->rate_mode = RATE_MODE_CCK;
- if (hwrate->flags & DEV_RATE_OFDM)
- txdesc->rate_mode = RATE_MODE_OFDM;
+ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
+ else if (txrate->flags & IEEE80211_TX_RC_MCS)
+ txdesc->rate_mode = RATE_MODE_HT_MIX;
+ else {
+ rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
+ hwrate = rt2x00_get_rate(rate->hw_value);
+ if (hwrate->flags & DEV_RATE_OFDM)
+ txdesc->rate_mode = RATE_MODE_OFDM;
+ else
+ txdesc->rate_mode = RATE_MODE_CCK;
+ }
/*
* Apply TX descriptor handling by components
*/
- rt2x00crypto_create_tx_descriptor(entry, txdesc);
- rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
- rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
- rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
+ rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
+ rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
+
+ if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
+ rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
+ sta, hwrate);
+ else
+ rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
+ hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -412,18 +530,18 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
*/
if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
rt2x00dev->ops->lib->get_entry_state(entry))) {
- ERROR(rt2x00dev,
- "Corrupt queue %d, accessing entry which is not ours.\n"
- "Please file bug report to %s.\n",
- entry->queue->qid, DRV_PROJECT);
+ rt2x00_err(rt2x00dev,
+ "Corrupt queue %d, accessing entry which is not ours\n"
+ "Please file bug report to %s\n",
+ entry->queue->qid, DRV_PROJECT);
return -EINVAL;
}
/*
* Add the requested extra tx headroom in front of the skb.
*/
- skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
- memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+ skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
+ memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
/*
* Call the driver's write_tx_data function, if it exists.
@@ -434,8 +552,9 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Map the skb to DMA.
*/
- if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
- rt2x00queue_map_txskb(entry);
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
+ rt2x00queue_map_txskb(entry))
+ return -ENOMEM;
return 0;
}
@@ -468,37 +587,67 @@ static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
*/
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
- queue->rt2x00dev->ops->lib->kick_tx_queue(queue);
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+}
+
+static void rt2x00queue_bar_check(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ struct ieee80211_bar *bar = (void *) (entry->skb->data +
+ rt2x00dev->extra_tx_headroom);
+ struct rt2x00_bar_list_entry *bar_entry;
+
+ if (likely(!ieee80211_is_back_req(bar->frame_control)))
+ return;
+
+ bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC);
+
+ /*
+ * If the alloc fails we still send the BAR out but just don't track
+ * it in our bar list. And as a result we will report it to mac80211
+ * back as failed.
+ */
+ if (!bar_entry)
+ return;
+
+ bar_entry->entry = entry;
+ bar_entry->block_acked = 0;
+
+ /*
+ * Copy the relevant parts of the 802.11 BAR into out check list
+ * such that we can use RCU for less-overhead in the RX path since
+ * sending BARs and processing the according BlockAck should be
+ * the exception.
+ */
+ memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra));
+ memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta));
+ bar_entry->control = bar->control;
+ bar_entry->start_seq_num = bar->start_seq_num;
+
+ /*
+ * Insert BAR into our BAR check list.
+ */
+ spin_lock_bh(&rt2x00dev->bar_list_lock);
+ list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list);
+ spin_unlock_bh(&rt2x00dev->bar_list_lock);
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
- bool local)
+ struct ieee80211_sta *sta, bool local)
{
struct ieee80211_tx_info *tx_info;
- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry *entry;
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
u8 rate_idx, rate_flags;
-
- if (unlikely(rt2x00queue_full(queue)))
- return -ENOBUFS;
-
- if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
- &entry->flags))) {
- ERROR(queue->rt2x00dev,
- "Arrived at non-free entry in the non-full queue %d.\n"
- "Please file bug report to %s.\n",
- queue->qid, DRV_PROJECT);
- return -EINVAL;
- }
+ int ret = 0;
/*
* Copy all TX descriptor information into txdesc,
* after that we are free to use the skb->cb array
* for our information.
*/
- entry->skb = skb;
- rt2x00queue_create_tx_descriptor(entry, &txdesc);
+ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta);
/*
* All information is retrieved from the skb->cb array,
@@ -510,7 +659,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rate_flags = tx_info->control.rates[0].flags;
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = entry;
skbdesc->tx_rate_idx = rate_idx;
skbdesc->tx_rate_flags = rate_flags;
@@ -524,24 +672,51 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
*/
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
- if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
+ if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
rt2x00crypto_tx_copy_iv(skb, &txdesc);
else
rt2x00crypto_tx_remove_iv(skb, &txdesc);
}
/*
- * When DMA allocation is required we should guarentee to the
+ * When DMA allocation is required we should guarantee to the
* driver that the DMA is aligned to a 4-byte boundary.
* However some drivers require L2 padding to pad the payload
* rather then the header. This could be a requirement for
* PCI and USB devices, while header alignment only is valid
* for PCI devices.
*/
- if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
- rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
- else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
- rt2x00queue_align_frame(entry->skb);
+ if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_insert_l2pad(skb, txdesc.header_length);
+ else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_align_frame(skb);
+
+ /*
+ * That function must be called with bh disabled.
+ */
+ spin_lock(&queue->tx_lock);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+ rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
+ queue->qid);
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+
+ if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
+ &entry->flags))) {
+ rt2x00_err(queue->rt2x00dev,
+ "Arrived at non-free entry in the non-full queue %d\n"
+ "Please file bug report to %s\n",
+ queue->qid, DRV_PROJECT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skbdesc->entry = entry;
+ entry->skb = skb;
/*
* It could be possible that the queue was corrupted and this
@@ -551,25 +726,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out;
}
+ /*
+ * Put BlockAckReqs into our check list for driver BA processing.
+ */
+ rt2x00queue_bar_check(entry);
+
set_bit(ENTRY_DATA_PENDING, &entry->flags);
- rt2x00queue_index_inc(queue, Q_INDEX);
+ rt2x00queue_index_inc(entry, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
rt2x00queue_kick_tx_queue(queue, &txdesc);
- return 0;
+out:
+ spin_unlock(&queue->tx_lock);
+ return ret;
}
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
- struct skb_frame_desc *skbdesc;
- struct txentry_desc txdesc;
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -581,24 +761,43 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- if (!enable_beacon) {
- rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
- mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
- }
+ /*
+ * Clear beacon (single bssid devices don't need to clear the beacon
+ * since the beacon queue will get stopped anyway).
+ */
+ if (rt2x00dev->ops->lib->clear_beacon)
+ rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+ mutex_unlock(&intf->beacon_skb_mutex);
+
+ return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ struct skb_frame_desc *skbdesc;
+ struct txentry_desc txdesc;
+
+ if (unlikely(!intf->beacon))
+ return -ENOBUFS;
+
+ /*
+ * Clean up the beacon skb.
+ */
+ rt2x00queue_free_skb(intf->beacon);
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
- if (!intf->beacon->skb) {
- mutex_unlock(&intf->beacon_skb_mutex);
+ if (!intf->beacon->skb)
return -ENOMEM;
- }
/*
* Copy all TX descriptor information into txdesc,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
+ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL);
/*
* Fill in skb descriptor
@@ -608,19 +807,33 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
skbdesc->entry = intf->beacon;
/*
- * Send beacon to hardware and enable beacon genaration..
+ * Send beacon to hardware.
*/
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
+ return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ int ret;
+
+ mutex_lock(&intf->beacon_skb_mutex);
+ ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
+ return ret;
}
-void rt2x00queue_for_each_entry(struct data_queue *queue,
+bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void (*fn)(struct queue_entry *entry))
+ void *data,
+ bool (*fn)(struct queue_entry *entry,
+ void *data))
{
unsigned long irqflags;
unsigned int index_start;
@@ -628,10 +841,10 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
unsigned int i;
if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
- ERROR(queue->rt2x00dev,
- "Entry requested from invalid index range (%d - %d)\n",
- start, end);
- return;
+ rt2x00_err(queue->rt2x00dev,
+ "Entry requested from invalid index range (%d - %d)\n",
+ start, end);
+ return true;
}
/*
@@ -646,44 +859,29 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
spin_unlock_irqrestore(&queue->index_lock, irqflags);
/*
- * Start from the TX done pointer, this guarentees that we will
+ * Start from the TX done pointer, this guarantees that we will
* send out all frames in the correct order.
*/
if (index_start < index_end) {
- for (i = index_start; i < index_end; i++)
- fn(&queue->entries[i]);
+ for (i = index_start; i < index_end; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
} else {
- for (i = index_start; i < queue->limit; i++)
- fn(&queue->entries[i]);
+ for (i = index_start; i < queue->limit; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
- for (i = 0; i < index_end; i++)
- fn(&queue->entries[i]);
+ for (i = 0; i < index_end; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
}
-}
-EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
-
-struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
- const enum data_queue_qid queue)
-{
- int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
-
- if (queue == QID_RX)
- return rt2x00dev->rx;
-
- if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
- return &rt2x00dev->tx[queue];
-
- if (!rt2x00dev->bcn)
- return NULL;
- if (queue == QID_BEACON)
- return &rt2x00dev->bcn[0];
- else if (queue == QID_ATIM && atim)
- return &rt2x00dev->bcn[1];
-
- return NULL;
+ return false;
}
-EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
+EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index)
@@ -692,8 +890,8 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
unsigned long irqflags;
if (unlikely(index >= Q_INDEX_MAX)) {
- ERROR(queue->rt2x00dev,
- "Entry requested from invalid index type (%d)\n", index);
+ rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n",
+ index);
return NULL;
}
@@ -707,13 +905,14 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
-void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
+void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
{
+ struct data_queue *queue = entry->queue;
unsigned long irqflags;
if (unlikely(index >= Q_INDEX_MAX)) {
- ERROR(queue->rt2x00dev,
- "Index change on invalid index type (%d)\n", index);
+ rt2x00_err(queue->rt2x00dev,
+ "Index change on invalid index type (%d)\n", index);
return;
}
@@ -723,7 +922,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
if (queue->index[index] >= queue->limit)
queue->index[index] = 0;
- queue->last_action[index] = jiffies;
+ entry->last_action = jiffies;
if (index == Q_INDEX) {
queue->length++;
@@ -735,6 +934,181 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
+static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
+{
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to disable the queue
+ * inside mac80211.
+ */
+ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ default:
+ break;
+ }
+}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ rt2x00queue_pause_queue_nocheck(queue);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
+
+void rt2x00queue_unpause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ switch (queue->qid) {
+ case QID_AC_VO:
+ case QID_AC_VI:
+ case QID_AC_BE:
+ case QID_AC_BK:
+ /*
+ * For TX queues, we have to enable the queue
+ * inside mac80211.
+ */
+ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
+ break;
+ case QID_RX:
+ /*
+ * For RX we need to kick the queue now in order to
+ * receive frames.
+ */
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
+
+void rt2x00queue_start_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ set_bit(QUEUE_PAUSED, &queue->flags);
+
+ queue->rt2x00dev->ops->lib->start_queue(queue);
+
+ rt2x00queue_unpause_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
+
+void rt2x00queue_stop_queue(struct data_queue *queue)
+{
+ mutex_lock(&queue->status_lock);
+
+ if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
+ mutex_unlock(&queue->status_lock);
+ return;
+ }
+
+ rt2x00queue_pause_queue_nocheck(queue);
+
+ queue->rt2x00dev->ops->lib->stop_queue(queue);
+
+ mutex_unlock(&queue->status_lock);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
+
+void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
+{
+ bool tx_queue =
+ (queue->qid == QID_AC_VO) ||
+ (queue->qid == QID_AC_VI) ||
+ (queue->qid == QID_AC_BE) ||
+ (queue->qid == QID_AC_BK);
+
+
+ /*
+ * If we are not supposed to drop any pending
+ * frames, this means we must force a start (=kick)
+ * to the queue to make sure the hardware will
+ * start transmitting.
+ */
+ if (!drop && tx_queue)
+ queue->rt2x00dev->ops->lib->kick_queue(queue);
+
+ /*
+ * Check if driver supports flushing, if that is the case we can
+ * defer the flushing to the driver. Otherwise we must use the
+ * alternative which just waits for the queue to become empty.
+ */
+ if (likely(queue->rt2x00dev->ops->lib->flush_queue))
+ queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
+
+ /*
+ * The queue flush has failed...
+ */
+ if (unlikely(!rt2x00queue_empty(queue)))
+ rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
+ queue->qid);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
+
+void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_start_queue will call ieee80211_wake_queue
+ * for each queue after is has been properly initialized.
+ */
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_start_queue(queue);
+
+ rt2x00queue_start_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
+
+void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * rt2x00queue_stop_queue will call ieee80211_stop_queue
+ * as well, but we are completely shutting doing everything
+ * now, so it is much safer to stop all TX queues at once,
+ * and use rt2x00queue_stop_queue for cleaning up.
+ */
+ ieee80211_stop_queues(rt2x00dev->hw);
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_stop_queue(queue);
+
+ rt2x00queue_stop_queue(rt2x00dev->rx);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
+
+void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue)
+ rt2x00queue_flush_queue(queue, drop);
+
+ rt2x00queue_flush_queue(rt2x00dev->rx, drop);
+}
+EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
+
static void rt2x00queue_reset(struct data_queue *queue)
{
unsigned long irqflags;
@@ -745,22 +1119,12 @@ static void rt2x00queue_reset(struct data_queue *queue)
queue->count = 0;
queue->length = 0;
- for (i = 0; i < Q_INDEX_MAX; i++) {
+ for (i = 0; i < Q_INDEX_MAX; i++)
queue->index[i] = 0;
- queue->last_action[i] = jiffies;
- }
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- txall_queue_for_each(rt2x00dev, queue)
- rt2x00dev->ops->lib->kill_tx_queue(queue);
-}
-
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@@ -769,16 +1133,12 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
queue_for_each(rt2x00dev, queue) {
rt2x00queue_reset(queue);
- for (i = 0; i < queue->limit; i++) {
+ for (i = 0; i < queue->limit; i++)
rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
- if (queue->qid == QID_RX)
- rt2x00queue_index_inc(queue, Q_INDEX);
- }
}
}
-static int rt2x00queue_alloc_entries(struct data_queue *queue,
- const struct data_queue_desc *qdesc)
+static int rt2x00queue_alloc_entries(struct data_queue *queue)
{
struct queue_entry *entries;
unsigned int entry_size;
@@ -786,15 +1146,10 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
rt2x00queue_reset(queue);
- queue->limit = qdesc->entry_num;
- queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
- queue->data_size = qdesc->data_size;
- queue->desc_size = qdesc->desc_size;
-
/*
* Allocate all queue entries.
*/
- entry_size = sizeof(*entries) + qdesc->priv_size;
+ entry_size = sizeof(*entries) + queue->priv_size;
entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -810,7 +1165,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
entries[i].entry_idx = i;
entries[i].priv_data =
QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
- sizeof(*entries), qdesc->priv_size);
+ sizeof(*entries), queue->priv_size);
}
#undef QUEUE_ENTRY_PRIV_OFFSET
@@ -838,7 +1193,7 @@ static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
struct sk_buff *skb;
for (i = 0; i < queue->limit; i++) {
- skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
+ skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
if (!skb)
return -ENOMEM;
queue->entries[i].skb = skb;
@@ -852,23 +1207,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
int status;
- status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
+ status = rt2x00queue_alloc_entries(rt2x00dev->rx);
if (status)
goto exit;
tx_queue_for_each(rt2x00dev, queue) {
- status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
+ status = rt2x00queue_alloc_entries(queue);
if (status)
goto exit;
}
- status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
+ status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
if (status)
goto exit;
- if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
- status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
- rt2x00dev->ops->atim);
+ if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
+ status = rt2x00queue_alloc_entries(rt2x00dev->atim);
if (status)
goto exit;
}
@@ -880,7 +1234,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
return 0;
exit:
- ERROR(rt2x00dev, "Queue entries allocation failed.\n");
+ rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
rt2x00queue_uninitialize(rt2x00dev);
@@ -902,6 +1256,8 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
+ mutex_init(&queue->status_lock);
+ spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
@@ -910,6 +1266,10 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
queue->aifs = 2;
queue->cw_min = 5;
queue->cw_max = 10;
+
+ rt2x00dev->ops->queue_init(queue);
+
+ queue->threshold = DIV_ROUND_UP(queue->limit, 10);
}
int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
@@ -917,7 +1277,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
enum data_queue_qid qid;
unsigned int req_atim =
- !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
/*
* We need the following queues:
@@ -930,7 +1290,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
if (!queue) {
- ERROR(rt2x00dev, "Queue allocation failed.\n");
+ rt2x00_err(rt2x00dev, "Queue allocation failed\n");
return -ENOMEM;
}
@@ -940,11 +1300,12 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->rx = queue;
rt2x00dev->tx = &queue[1];
rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
+ rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
/*
* Initialize queue parameters.
* RX: qid = QID_RX
- * TX: qid = QID_AC_BE + index
+ * TX: qid = QID_AC_VO + index
* TX: cw_min: 2^5 = 32.
* TX: cw_max: 2^10 = 1024.
* BCN: qid = QID_BEACON
@@ -952,13 +1313,13 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
*/
rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
- qid = QID_AC_BE;
+ qid = QID_AC_VO;
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);
- rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
+ rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
if (req_atim)
- rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
+ rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
return 0;
}