aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/ath/ath5k/qcu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath5k/qcu.c')
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c922
1 files changed, 552 insertions, 370 deletions
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f4..0583c69d26d 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,65 +17,200 @@
*/
/********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
\********************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
-#include "base.h"
+#include <linux/log2.h>
-/*
- * Get properties for a transmit queue
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
+
+
+/******************\
+* Helper functions *
+\******************/
+
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+{
+ u32 pending;
+ AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
+
+ /* Return if queue is declared inactive */
+ if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ return false;
+
+ /* XXX: How about AR5K_CFG_TXCNT ? */
+ if (ah->ah_version == AR5K_AR5210)
+ return false;
+
+ pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
+ pending &= AR5K_QCU_STS_FRMPENDCNT;
+
+ /* It's possible to have no frames pending even if TXE
+ * is set. To indicate that q has not stopped return
+ * true */
+ if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
+ return true;
+
+ return pending;
+}
+
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ */
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+{
+ if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
+ return;
+
+ /* This queue will be skipped in further operations */
+ ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
+ /*For SIMR setup*/
+ AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
+}
+
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
+ * Make sure cw is a power of 2 minus 1 and smaller than 1024
+ */
+static u16
+ath5k_cw_validate(u16 cw_req)
+{
+ cw_req = min(cw_req, (u16)1023);
+
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
+
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
+}
+
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
+ */
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
- ATH5K_TRACE(ah->ah_sc);
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
*/
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
- const struct ath5k_txq_info *queue_info)
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+ const struct ath5k_txq_info *qinfo)
{
- ATH5K_TRACE(ah->ah_sc);
+ struct ath5k_txq_info *qi;
+
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
- if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ qi = &ah->ah_txq[queue];
+
+ if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EIO;
- memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
+ /* copy and validate values */
+ qi->tqi_type = qinfo->tqi_type;
+ qi->tqi_subtype = qinfo->tqi_subtype;
+ qi->tqi_flags = qinfo->tqi_flags;
+ /*
+ * According to the docs: Although the AIFS field is 8 bit wide,
+ * the maximum supported value is 0xFC. Setting it higher than that
+ * will cause the DCU to hang.
+ */
+ qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
+ qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
+ qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
+ qi->tqi_cbr_period = qinfo->tqi_cbr_period;
+ qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
+ qi->tqi_burst_time = qinfo->tqi_burst_time;
+ qi->tqi_ready_time = qinfo->tqi_ready_time;
/*XXX: Is this supported on 5210 ?*/
- if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
- ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
- (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
- queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
- ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
+ /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
+ if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
+ ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
+ (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
+ qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
+ qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
return 0;
}
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
*/
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
int ret;
- ATH5K_TRACE(ah->ah_sc);
-
/*
* Get queue by type
*/
- /*5210 only has 2 queues*/
- if (ah->ah_version == AR5K_AR5210) {
+ /* 5210 only has 2 queues */
+ if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
@@ -107,13 +242,6 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_CAB;
break;
- case AR5K_TX_QUEUE_XR_DATA:
- if (ah->ah_version != AR5K_AR5212)
- ATH5K_ERR(ah->ah_sc,
- "XR data queues only supported in"
- " 5212!\n");
- queue = AR5K_TX_QUEUE_ID_XR_DATA;
- break;
default:
return -EINVAL;
}
@@ -142,414 +270,468 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
return queue;
}
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
- */
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
-{
- u32 pending;
- ATH5K_TRACE(ah->ah_sc);
- AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
-
- /* Return if queue is declared inactive */
- if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
- return false;
-
- /* XXX: How about AR5K_CFG_TXCNT ? */
- if (ah->ah_version == AR5K_AR5210)
- return false;
-
- pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
- pending &= AR5K_QCU_STS_FRMPENDCNT;
-
- /* It's possible to have no frames pending even if TXE
- * is set. To indicate that q has not stopped return
- * true */
- if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
- return true;
- return pending;
-}
+/*******************************\
+* Single QCU/DCU initialization *
+\*******************************/
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue)
{
- ATH5K_TRACE(ah->ah_sc);
- if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
- return;
+ /* Single data queue on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ struct ath5k_txq_info *tq = &ah->ah_txq[queue];
- /* This queue will be skipped in further operations */
- ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
- /*For SIMR setup*/
- AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
+ if (queue > 0)
+ return;
+
+ ath5k_hw_reg_write(ah,
+ (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_LG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SH_RETRY),
+ AR5K_NODCU_RETRY_LMT);
+ /* DCU on AR5211+ */
+ } else {
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_RTS)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_STA_RTS)
+ | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+ AR5K_DCU_RETRY_LMT_STA_DATA),
+ AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+ }
}
-/*
- * Set DFS properties for a transmit queue on DCU
+/**
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * Set DCF properties for the given transmit queue on DCU
+ * and configures all queue-specific parameters.
*/
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
- u32 cw_min, cw_max, retry_lg, retry_sh;
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
- ATH5K_TRACE(ah->ah_sc);
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
tq = &ah->ah_txq[queue];
- if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
+ /* Skip if queue inactive or if we are on AR5210
+ * that doesn't have QCU/DCU */
+ if ((ah->ah_version == AR5K_AR5210) ||
+ (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
return 0;
- if (ah->ah_version == AR5K_AR5210) {
- /* Only handle data queues, others will be ignored */
- if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
- return 0;
-
- /* Set Slot time */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
- AR5K_SLOT_TIME);
- /* Set ACK_CTS timeout */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
- AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
- /* Set Transmit Latency */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_TRANSMIT_LATENCY_TURBO :
- AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
-
- /* Set IFS0 */
- if (ah->ah_turbo) {
- ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
- (ah->ah_aifs + tq->tqi_aifs) *
- AR5K_INIT_SLOT_TIME_TURBO) <<
- AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
- AR5K_IFS0);
- } else {
- ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
- (ah->ah_aifs + tq->tqi_aifs) *
- AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
- AR5K_INIT_SIFS, AR5K_IFS0);
- }
-
- /* Set IFS1 */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
- AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
- /* Set AR5K_PHY_SETTLING */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
- | 0x38 :
- (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
- | 0x1C,
- AR5K_PHY_SETTLING);
- /* Set Frame Control Register */
- ath5k_hw_reg_write(ah, ah->ah_turbo ?
- (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
- AR5K_PHY_TURBO_SHORT | 0x2020) :
- (AR5K_PHY_FRAME_CTL_INI | 0x1020),
- AR5K_PHY_FRAME_CTL_5210);
- }
-
/*
- * Calculate cwmin/max by channel mode
+ * Set contention window (cw_min/cw_max)
+ * and arbitrated interframe space (aifs)...
*/
- cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
- cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
- ah->ah_aifs = AR5K_TUNE_AIFS;
- /*XR is only supported on 5212*/
- if (IS_CHAN_XR(ah->ah_current_channel) &&
- ah->ah_version == AR5K_AR5212) {
- cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
- cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
- ah->ah_aifs = AR5K_TUNE_AIFS_XR;
- /*B mode is not supported on 5210*/
- } else if (IS_CHAN_B(ah->ah_current_channel) &&
- ah->ah_version != AR5K_AR5210) {
- cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
- cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
- ah->ah_aifs = AR5K_TUNE_AIFS_11B;
- }
+ ath5k_hw_reg_write(ah,
+ AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
+ AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
+ AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
+ AR5K_QUEUE_DFS_LOCAL_IFS(queue));
- cw_min = 1;
- while (cw_min < ah->ah_cw_min)
- cw_min = (cw_min << 1) | 1;
+ /*
+ * Set tx retry limits for this queue
+ */
+ ath5k_hw_set_tx_retry_limits(ah, queue);
- cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
- ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
- cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
- ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
/*
- * Calculate and set retry limits
+ * Set misc registers
*/
- if (ah->ah_software_retry) {
- /* XXX Need to test this */
- retry_lg = ah->ah_limit_tx_retries;
- retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
- AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
- } else {
- retry_lg = AR5K_INIT_LG_RETRY;
- retry_sh = AR5K_INIT_SH_RETRY;
- }
- /*No QCU/DCU [5210]*/
- if (ah->ah_version == AR5K_AR5210) {
- ath5k_hw_reg_write(ah,
- (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
- | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_NODCU_RETRY_LMT_SLG_RETRY)
- | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_NODCU_RETRY_LMT_SSH_RETRY)
- | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
- | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
- AR5K_NODCU_RETRY_LMT);
- } else {
- /*QCU/DCU [5211+]*/
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_DCU_RETRY_LMT_SLG_RETRY) |
- AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_DCU_RETRY_LMT_SSH_RETRY) |
- AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
- AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
- AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
+ /* Enable DCU to wait for next fragment from QCU */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_FRAG_WAIT);
- /*===Rest is also for QCU/DCU only [5211+]===*/
+ /* On Maui and Spirit use the global seqnum on DCU */
+ if (ah->ah_mac_version < AR5K_SREV_AR5211)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_DCU_MISC_SEQNUM_CTL);
+
+ /* Constant bit rate period */
+ if (tq->tqi_cbr_period) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
+ AR5K_QCU_CBRCFG_INTVAL) |
+ AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
+ AR5K_QCU_CBRCFG_ORN_THRES),
+ AR5K_QUEUE_CBRCFG(queue));
- /*
- * Set initial content window (cw_min/cw_max)
- * and arbitrated interframe space (aifs)...
- */
- ath5k_hw_reg_write(ah,
- AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
- AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
- AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
- AR5K_DCU_LCL_IFS_AIFS),
- AR5K_QUEUE_DFS_LOCAL_IFS(queue));
-
- /*
- * Set misc registers
- */
- /* Enable DCU early termination for this queue */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_DCU_EARLY);
+ AR5K_QCU_MISC_FRSHED_CBR);
- /* Enable DCU to wait for next fragment from QCU */
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- AR5K_DCU_MISC_FRAG_WAIT);
-
- /* On Maui and Spirit use the global seqnum on DCU */
- if (ah->ah_mac_version < AR5K_SREV_AR5211)
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- AR5K_DCU_MISC_SEQNUM_CTL);
-
- if (tq->tqi_cbr_period) {
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
- AR5K_QCU_CBRCFG_INTVAL) |
- AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
- AR5K_QCU_CBRCFG_ORN_THRES),
- AR5K_QUEUE_CBRCFG(queue));
+ if (tq->tqi_cbr_overflow_limit)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_CBR);
- if (tq->tqi_cbr_overflow_limit)
- AR5K_REG_ENABLE_BITS(ah,
- AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBR_THRES_ENABLE);
- }
+ }
+
+ /* Ready time interval */
+ if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
+ AR5K_QCU_RDYTIMECFG_INTVAL) |
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
+
+ if (tq->tqi_burst_time) {
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
+ AR5K_DCU_CHAN_TIME_DUR) |
+ AR5K_DCU_CHAN_TIME_ENABLE,
+ AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
- if (tq->tqi_ready_time &&
- (tq->tqi_type != AR5K_TX_QUEUE_CAB))
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
- AR5K_QCU_RDYTIMECFG_INTVAL) |
- AR5K_QCU_RDYTIMECFG_ENABLE,
- AR5K_QUEUE_RDYTIMECFG(queue));
-
- if (tq->tqi_burst_time) {
- ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
- AR5K_DCU_CHAN_TIME_DUR) |
- AR5K_DCU_CHAN_TIME_ENABLE,
- AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
-
- if (tq->tqi_flags
- & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
- AR5K_REG_ENABLE_BITS(ah,
- AR5K_QUEUE_MISC(queue),
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_RDY_VEOL_POLICY);
- }
+ }
- if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
- ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
- AR5K_QUEUE_DFS_MISC(queue));
+ /* Enable/disable Post frame backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
+ AR5K_QUEUE_DFS_MISC(queue));
- if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
- ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
- AR5K_QUEUE_DFS_MISC(queue));
+ /* Enable/disable fragmentation burst backoff */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
+ ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
+ AR5K_QUEUE_DFS_MISC(queue));
- /*
- * Set registers by queue type
- */
- switch (tq->tqi_type) {
- case AR5K_TX_QUEUE_BEACON:
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ /*
+ * Set registers by queue type
+ */
+ switch (tq->tqi_type) {
+ case AR5K_TX_QUEUE_BEACON:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_BCN_DIS |
AR5K_QCU_MISC_BCN_ENABLE);
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S) |
AR5K_DCU_MISC_ARBLOCK_IGNORE |
AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
AR5K_DCU_MISC_BCN_ENABLE);
- break;
+ break;
- case AR5K_TX_QUEUE_CAB:
- /* XXX: use BCN_SENT_GT, if we can figure out how */
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_DBA_GT |
- AR5K_QCU_MISC_CBREXP_DIS |
- AR5K_QCU_MISC_CBREXP_BCN_DIS);
+ case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
+ AR5K_QCU_MISC_CBREXP_DIS |
+ AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
- (AR5K_TUNE_SW_BEACON_RESP -
- AR5K_TUNE_DMA_BEACON_RESP) -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
+ (AR5K_TUNE_SW_BEACON_RESP -
+ AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
- AR5K_QCU_RDYTIMECFG_ENABLE,
- AR5K_QUEUE_RDYTIMECFG(queue));
+ AR5K_QCU_RDYTIMECFG_ENABLE,
+ AR5K_QUEUE_RDYTIMECFG(queue));
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
- (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
- AR5K_DCU_MISC_ARBLOCK_CTL_S));
- break;
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
+ (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
+ AR5K_DCU_MISC_ARBLOCK_CTL_S));
+ break;
- case AR5K_TX_QUEUE_UAPSD:
- AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_CBREXP_DIS);
- break;
+ case AR5K_TX_QUEUE_UAPSD:
+ AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
+ AR5K_QCU_MISC_CBREXP_DIS);
+ break;
- case AR5K_TX_QUEUE_DATA:
- default:
+ case AR5K_TX_QUEUE_DATA:
+ default:
break;
- }
-
- /* TODO: Handle frame compression */
-
- /*
- * Enable interrupts for this tx queue
- * in the secondary interrupt mask registers
- */
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
-
- if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
- AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
-
- /* Update secondary interrupt mask registers */
-
- /* Filter out inactive queues */
- ah->ah_txq_imr_txok &= ah->ah_txq_status;
- ah->ah_txq_imr_txerr &= ah->ah_txq_status;
- ah->ah_txq_imr_txurn &= ah->ah_txq_status;
- ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
- ah->ah_txq_imr_txeol &= ah->ah_txq_status;
- ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
- ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
- ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
- ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
-
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
- AR5K_SIMR0_QCU_TXOK) |
- AR5K_REG_SM(ah->ah_txq_imr_txdesc,
- AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
- AR5K_SIMR1_QCU_TXERR) |
- AR5K_REG_SM(ah->ah_txq_imr_txeol,
- AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
- /* Update simr2 but don't overwrite rest simr2 settings */
- AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
- AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
- AR5K_REG_SM(ah->ah_txq_imr_txurn,
- AR5K_SIMR2_QCU_TXURN));
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
- AR5K_SIMR3_QCBRORN) |
- AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
- AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
- AR5K_SIMR4_QTRIG), AR5K_SIMR4);
- /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
- ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
- AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
- /* No queue has TXNOFRM enabled, disable the interrupt
- * by setting AR5K_TXNOFRM to zero */
- if (ah->ah_txq_imr_nofrm == 0)
- ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
-
- /* Set QCU mask for this DCU to save power */
- AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
}
+ /* TODO: Handle frame compression */
+
+ /*
+ * Enable interrupts for this tx queue
+ * in the secondary interrupt mask registers
+ */
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
+
+ if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
+ AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
+
+ /* Update secondary interrupt mask registers */
+
+ /* Filter out inactive queues */
+ ah->ah_txq_imr_txok &= ah->ah_txq_status;
+ ah->ah_txq_imr_txerr &= ah->ah_txq_status;
+ ah->ah_txq_imr_txurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
+ ah->ah_txq_imr_txeol &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
+ ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
+ ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
+ ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
+ AR5K_SIMR0_QCU_TXOK) |
+ AR5K_REG_SM(ah->ah_txq_imr_txdesc,
+ AR5K_SIMR0_QCU_TXDESC),
+ AR5K_SIMR0);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
+ AR5K_SIMR1_QCU_TXERR) |
+ AR5K_REG_SM(ah->ah_txq_imr_txeol,
+ AR5K_SIMR1_QCU_TXEOL),
+ AR5K_SIMR1);
+
+ /* Update SIMR2 but don't overwrite rest simr2 settings */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
+ AR5K_REG_SM(ah->ah_txq_imr_txurn,
+ AR5K_SIMR2_QCU_TXURN));
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
+ AR5K_SIMR3_QCBRORN) |
+ AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
+ AR5K_SIMR3_QCBRURN),
+ AR5K_SIMR3);
+
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
+ AR5K_SIMR4_QTRIG), AR5K_SIMR4);
+
+ /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
+ ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
+ AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
+
+ /* No queue has TXNOFRM enabled, disable the interrupt
+ * by setting AR5K_TXNOFRM to zero */
+ if (ah->ah_txq_imr_nofrm == 0)
+ ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
+
+ /* Set QCU mask for this DCU to save power */
+ AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
+
return 0;
}
-/*
- * Get slot time from DCU
+
+/**************************\
+* Global QCU/DCU functions *
+\**************************/
+
+/**
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
+ *
+ * Sets the global IFS intervals on DCU (also works on AR5210) for
+ * the given slot time and the current bwmode.
*/
-unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
+int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
- unsigned int slot_time_clock;
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rate *rate;
+ u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+ u32 rate_flags, i;
- ATH5K_TRACE(ah->ah_sc);
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
+ return -EINVAL;
- if (ah->ah_version == AR5K_AR5210)
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
+ sifs = ath5k_hw_get_default_sifs(ah);
+ sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
+
+ /* EIFS
+ * Txtime of ack at lowest rate + SIFS + DIFS
+ * (DIFS = SIFS + 2 * Slot time)
+ *
+ * Note: HAL has some predefined values for EIFS
+ * Turbo: (37 + 2 * 6)
+ * Default: (74 + 2 * 9)
+ * Half: (149 + 2 * 13)
+ * Quarter: (298 + 2 * 21)
+ *
+ * (74 + 2 * 6) for AR5210 default and turbo !
+ *
+ * According to the formula we have
+ * ack_tx_time = 25 for turbo and
+ * ack_tx_time = 42.5 * clock multiplier
+ * for default/half/quarter.
+ *
+ * This can't be right, 42 is what we would get
+ * from ath5k_hw_get_frame_dur_for_bwmode or
+ * ieee80211_generic_frame_duration for zero frame
+ * length and without SIFS !
+ *
+ * Also we have different lowest rate for 802.11a
+ */
+ if (channel->band == IEEE80211_BAND_5GHZ)
+ band = IEEE80211_BAND_5GHZ;
else
- slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+ band = IEEE80211_BAND_2GHZ;
+
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
+ break;
+ default:
+ rate_flags = 0;
+ break;
+ }
+ sband = &ah->sbands[band];
+ rate = NULL;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rate = &sband->bitrates[i];
+ break;
+ }
+ if (WARN_ON(!rate))
+ return -EINVAL;
+
+ ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
+
+ /* ack_tx_time includes an SIFS already */
+ eifs = ack_tx_time + sifs + 2 * slot_time;
+ eifs_clock = ath5k_hw_htoclock(ah, eifs);
+
+ /* Set IFS settings on AR5210 */
+ if (ah->ah_version == AR5K_AR5210) {
+ u32 pifs, pifs_clock, difs, difs_clock;
+
+ /* Set slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
+
+ /* Set EIFS */
+ eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
- return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
+ /* PIFS = Slot time + SIFS */
+ pifs = slot_time + sifs;
+ pifs_clock = ath5k_hw_htoclock(ah, pifs);
+ pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
+
+ /* DIFS = SIFS + 2 * Slot time */
+ difs = sifs + 2 * slot_time;
+ difs_clock = ath5k_hw_htoclock(ah, difs);
+
+ /* Set SIFS/DIFS */
+ ath5k_hw_reg_write(ah, (difs_clock <<
+ AR5K_IFS0_DIFS_S) | sifs_clock,
+ AR5K_IFS0);
+
+ /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
+ ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
+ (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
+ AR5K_IFS1);
+
+ return 0;
+ }
+
+ /* Set IFS slot time */
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
+
+ /* Set EIFS interval */
+ ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
+
+ /* Set SIFS interval in usecs */
+ AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
+ sifs);
+
+ /* Set SIFS interval in clock cycles */
+ ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
+
+ return 0;
}
-/*
- * Set slot time on DCU
+
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
*/
-int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
{
- u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+ int i, ret;
- ATH5K_TRACE(ah->ah_sc);
+ /* TODO: HW Compression support for data queues */
+ /* TODO: Burst prefetch for data queues */
- if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
- return -EINVAL;
-
- if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
+ /*
+ * Reset queues and start beacon timers at the end of the reset routine
+ * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
+ * Note: If we want we can assign multiple qcus on one dcu.
+ */
+ if (ah->ah_version != AR5K_AR5210)
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
+ ret = ath5k_hw_reset_tx_queue(ah, i);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "failed to reset TX queue #%d\n", i);
+ return ret;
+ }
+ }
else
- ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
+ /* No QCU/DCU on AR5210, just set tx
+ * retry limits. We set IFS parameters
+ * on ath5k_hw_set_ifs_intervals */
+ ath5k_hw_set_tx_retry_limits(ah, 0);
+
+ /* Set the turbo flag when operating on 40MHz */
+ if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
+ AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
+ AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
+
+ /* If we didn't set IFS timings through
+ * ath5k_hw_set_coverage_class make sure
+ * we set them here */
+ if (!ah->ah_coverage_class) {
+ unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
+ ath5k_hw_set_ifs_intervals(ah, slot_time);
+ }
return 0;
}
-