aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/iwlwifi/pcie/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c89
1 files changed, 53 insertions, 36 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 6c5b867c353..8e9e3212fe7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+ iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw =
- iwl_read_targ_mem(trans,
- trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(i));
+ iwl_trans_read_mem32(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
break;
}
- bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
+ if (trans_pcie->bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -306,6 +309,9 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
return;
}
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
+ txq->q.write_ptr);
+
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
txq->q.write_ptr | (txq_id << 8));
@@ -612,7 +618,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
if (txq->q.n_bd) {
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
- memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
+ txq->q.dma_addr = 0;
}
kfree(txq->entries);
@@ -638,9 +644,11 @@ static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 a;
+ int nq = trans->cfg->base_params->num_of_queues;
int chan;
u32 reg_val;
+ int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+ SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
@@ -652,20 +660,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
WARN_ON(scd_base_addr != 0 &&
scd_base_addr != trans_pcie->scd_base_addr);
- a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
- /* reset conext data memory */
- for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- /* reset tx status memory */
- for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
- for (; a < trans_pcie->scd_base_addr +
- SCD_TRANS_TBL_OFFSET_QUEUE(
- trans->cfg->base_params->num_of_queues);
- a += 4)
- iwl_write_targ_mem(trans, a, 0);
+ /* reset context data, TX status and translation data */
+ iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_MEM_LOWER_BOUND,
+ NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
@@ -697,6 +695,29 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int txq_id;
+
+ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+ txq_id++) {
+ struct iwl_txq *txq = &trans_pcie->txq[txq_id];
+
+ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
+ txq->q.dma_addr >> 8);
+ iwl_pcie_txq_unmap(trans, txq_id);
+ txq->q.read_ptr = 0;
+ txq->q.write_ptr = 0;
+ }
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+ trans_pcie->kw.dma >> 4);
+
+ iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+}
+
/*
* iwl_pcie_tx_stop - Stop all Tx DMA channels
*/
@@ -905,7 +926,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return;
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
@@ -949,7 +970,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq);
out:
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
/*
@@ -1002,14 +1023,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
- tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
+ tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
- iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
+ iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
return 0;
}
@@ -1068,9 +1089,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
@@ -1101,8 +1122,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_set_inactive(trans, txq_id);
- _iwl_write_targ_mem_dwords(trans, stts_addr,
- zero_val, ARRAY_SIZE(zero_val));
+ iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+ ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id);
@@ -1350,7 +1371,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
return;
}
- spin_lock(&txq->lock);
+ spin_lock_bh(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->entries[cmd_index].cmd;
@@ -1384,7 +1405,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->flags = 0;
- spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->lock);
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
@@ -1642,10 +1663,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
- IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
- le16_to_cpu(dev_cmd->hdr.sequence));
- IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
-
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));