diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie/rx.c')
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 972 |
1 files changed, 564 insertions, 408 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d8268..a2698e5e062 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -76,116 +76,125 @@ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled * to replenish the iwl->rxq->rx_free. - * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the * iwl->rxq is replenished and the READ INDEX is updated (updating the * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free - * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ - * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there - * were enough free buffers and RX_STALLED is set it is cleared. + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. + * If there were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * - * iwl_rx_queue_alloc() Allocates rx_free - * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_rx_queue_restock - * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * iwl_rxq_alloc() Allocates rx_free + * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_rx_replenish + * are available, schedules iwl_pcie_rx_replenish * * -- enable interrupts -- - * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. - * Calls iwl_rx_queue_restock to refill any empty + * Calls iwl_pcie_rxq_restock to refill any empty * slots. * ... * */ -/** - * iwl_rx_queue_space - Return number of free slots available in queue. +/* + * iwl_rxq_space - Return number of free slots available in queue. */ -static int iwl_rx_queue_space(const struct iwl_rx_queue *q) +static int iwl_rxq_space(const struct iwl_rxq *rxq) { - int s = q->read - q->write; - if (s <= 0) - s += RX_QUEUE_SIZE; - /* keep some buffer to not confuse full and empty queue */ - s -= 2; - if (s < 0) - s = 0; - return s; + /* Make sure RX_QUEUE_SIZE is a power of 2 */ + BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + + /* + * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity + * between empty and completely full queues. + * The following is equivalent to modulo by RX_QUEUE_SIZE and is well + * defined for negative dividends. + */ + return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); } -/** - * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue +/* + * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ -void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, - struct iwl_rx_queue *q) +static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) { - unsigned long flags; - u32 reg; - - spin_lock_irqsave(&q->lock, flags); + return cpu_to_le32((u32)(dma_addr >> 8)); +} - if (q->need_update == 0) - goto exit_unlock; +/* + * iwl_pcie_rx_stop - stops the Rx DMA + */ +int iwl_pcie_rx_stop(struct iwl_trans *trans) +{ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); +} - if (trans->cfg->base_params->shadow_reg_enable) { - /* shadow register enabled */ - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); - } else { - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) { - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, - "Rx queue requesting wakeup," - " GP1 = 0x%x\n", reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } +/* + * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue + */ +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 reg; - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - q->write_actual); + lockdep_assert_held(&rxq->lock); - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - q->write_actual = (q->write & ~0x7); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - q->write_actual); + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", + reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rxq->need_update = true; + return; } } - q->need_update = 0; - exit_unlock: - spin_unlock_irqrestore(&q->lock, flags); + rxq->write_actual = round_down(rxq->write, 8); + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } -/** - * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr - */ -static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) +static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { - return cpu_to_le32((u32)(dma_addr >> 8)); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + + spin_lock(&rxq->lock); + + if (!rxq->need_update) + goto exit_unlock; + + iwl_pcie_rxq_inc_wr_ptr(trans); + rxq->need_update = false; + + exit_unlock: + spin_unlock(&rxq->lock); } -/** - * iwl_rx_queue_restock - refill RX queue from pre-allocated pool +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much @@ -195,43 +204,41 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr) * also updates the memory address in the firmware to reference the new * target buffer. */ -static void iwl_rx_queue_restock(struct iwl_trans *trans) +static void iwl_pcie_rxq_restock(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; - struct list_head *element; + struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; - unsigned long flags; /* * If the device isn't enabled - not need to try to add buffers... * This can happen when we stop the device and still have an interrupt - * pending. We stop the APM before we sync the interrupts / tasklets - * because we have to (see comment there). On the other hand, since - * the APM is stopped, we cannot access the HW (in particular not prph). + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). * So don't try to restock if the APM has been already stopped. */ - if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return; - spin_lock_irqsave(&rxq->lock, flags); - while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + spin_lock(&rxq->lock); + while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); /* Get next free Rx buffer, remove from free list */ - element = rxq->rx_free.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma); + rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; } - spin_unlock_irqrestore(&rxq->lock, flags); + spin_unlock(&rxq->lock); /* If the pre-allocated buffer pool is dropping low, schedule to * refill it */ if (rxq->free_count <= RX_LOW_WATERMARK) @@ -240,39 +247,36 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans) /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { - spin_lock_irqsave(&rxq->lock, flags); - rxq->need_update = 1; - spin_unlock_irqrestore(&rxq->lock, flags); - iwl_rx_queue_update_write_ptr(trans, rxq); + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans); + spin_unlock(&rxq->lock); } } /* - * iwl_rx_allocate - allocate a page for each used RBD + * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again * a page must be allocated and the RBD must point to the page. This function * doesn't change the HW pointer but handles the list of pages that is used by - * iwl_rx_queue_restock. The latter function will update the HW to use the newly + * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; - struct list_head *element; + struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - unsigned long flags; gfp_t gfp_mask = priority; while (1) { - spin_lock_irqsave(&rxq->lock, flags); + spin_lock(&rxq->lock); if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); + spin_unlock(&rxq->lock); return; } - spin_unlock_irqrestore(&rxq->lock, flags); + spin_unlock(&rxq->lock); if (rxq->free_count > RX_LOW_WATERMARK) gfp_mask |= __GFP_NOWARN; @@ -301,18 +305,17 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) return; } - spin_lock_irqsave(&rxq->lock, flags); + spin_lock(&rxq->lock); if (list_empty(&rxq->rx_used)) { - spin_unlock_irqrestore(&rxq->lock, flags); + spin_unlock(&rxq->lock); __free_pages(page, trans_pcie->rx_page_order); return; } - element = rxq->rx_used.next; - rxb = list_entry(element, struct iwl_rx_mem_buffer, list); - list_del(element); - - spin_unlock_irqrestore(&rxq->lock, flags); + rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + spin_unlock(&rxq->lock); BUG_ON(rxb->page); rxb->page = page; @@ -321,62 +324,253 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) dma_map_page(trans->dev, page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + spin_lock(&rxq->lock); + list_add(&rxb->list, &rxq->rx_used); + spin_unlock(&rxq->lock); + __free_pages(page, trans_pcie->rx_page_order); + return; + } /* dma address must be no more than 36 bits */ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - spin_lock_irqsave(&rxq->lock, flags); + spin_lock(&rxq->lock); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; - spin_unlock_irqrestore(&rxq->lock, flags); + spin_unlock(&rxq->lock); + } +} + +static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + int i; + + lockdep_assert_held(&rxq->lock); + + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + if (!rxq->pool[i].page) + continue; + dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); + rxq->pool[i].page = NULL; } } /* - * iwl_rx_replenish - Move all used buffers from rx_used to rx_free + * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free * * When moving to rx_free an page is allocated for the slot. * - * Also restock the Rx queue via iwl_rx_queue_restock. + * Also restock the Rx queue via iwl_pcie_rxq_restock. * This is called as a scheduled work item (except for during initialization) */ -void iwl_rx_replenish(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +{ + iwl_pcie_rxq_alloc_rbs(trans, gfp); + + iwl_pcie_rxq_restock(trans); +} + +static void iwl_pcie_rx_replenish_work(struct work_struct *data) +{ + struct iwl_trans_pcie *trans_pcie = + container_of(data, struct iwl_trans_pcie, rx_replenish); + + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); +} + +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct device *dev = trans->dev; + + memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); + + spin_lock_init(&rxq->lock); - iwl_rx_allocate(trans, GFP_KERNEL); + if (WARN_ON(rxq->bd || rxq->rb_stts)) + return -EINVAL; + + /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb_stts; + + return 0; - spin_lock_irqsave(&trans_pcie->irq_lock, flags); - iwl_rx_queue_restock(trans); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); +err_rb_stts: + dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; +err_bd: + return -ENOMEM; } -static void iwl_rx_replenish_now(struct iwl_trans *trans) +static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { - iwl_rx_allocate(trans, GFP_ATOMIC); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ - iwl_rx_queue_restock(trans); + if (trans_pcie->rx_buf_size_8k) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + /* reset and flush pointers */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); + + /* Reset driver's Rx queue write index */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in + * the credit mechanism in 5000 HW RX FIFO + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + rb_size| + (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + /* W/A for interrupt coalescing bug in 7260 and 3160 */ + if (trans->cfg->host_interrupt_operation_mode) + iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -void iwl_bg_rx_replenish(struct work_struct *data) +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + int i; + + lockdep_assert_held(&rxq->lock); - iwl_rx_replenish(trans_pcie->trans); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add(&rxq->pool[i].list, &rxq->rx_used); } -static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, +int iwl_pcie_rx_init(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + int i, err; + + if (!rxq->bd) { + err = iwl_pcie_rx_alloc(trans); + if (err) + return err; + } + + spin_lock(&rxq->lock); + + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); + + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rxq_free_rbs(trans); + iwl_pcie_rx_init_rxb_lists(rxq); + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); + spin_unlock(&rxq->lock); + + iwl_pcie_rx_replenish(trans, GFP_KERNEL); + + iwl_pcie_rx_hw_init(trans, rxq); + + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans); + spin_unlock(&rxq->lock); + + return 0; +} + +void iwl_pcie_rx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + + /*if rxq->bd is NULL, it means that nothing has been allocated, + * exit now */ + if (!rxq->bd) { + IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); + return; + } + + cancel_work_sync(&trans_pcie->rx_replenish); + + spin_lock(&rxq->lock); + iwl_pcie_rxq_free_rbs(trans); + spin_unlock(&rxq->lock); + + dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); + rxq->rb_stts_dma = 0; + rxq->rb_stts = NULL; +} + +static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; - struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; - unsigned long flags; + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; u32 offset = 0; @@ -394,6 +588,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, int index, cmd_index, err, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, + ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, .truesize = max_len, @@ -405,13 +600,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, break; IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", - rxcb._offset, - trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd), + rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd), pkt->hdr.cmd); - len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ - trace_iwlwifi_dev_rx(trans->dev, pkt, len); + trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); + trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. @@ -436,21 +631,16 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) { - struct iwl_pcie_tx_queue_entry *ent; - ent = &txq->entries[cmd_index]; - cmd = ent->copy_cmd; - WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); - } else { + if (reclaim) + cmd = txq->entries[cmd_index].cmd; + else cmd = NULL; - } err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); if (reclaim) { - /* The original command isn't needed any more */ - kfree(txq->entries[cmd_index].copy_cmd); - txq->entries[cmd_index].copy_cmd = NULL; + kfree(txq->entries[cmd_index].free_buf); + txq->entries[cmd_index].free_buf = NULL; } /* @@ -464,7 +654,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) - iwl_tx_cmd_complete(trans, &rxcb, err); + iwl_pcie_hcmd_complete(trans, &rxcb, err); else IWL_WARN(trans, "Claim null rxb?\n"); } @@ -482,38 +672,45 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ - spin_lock_irqsave(&rxq->lock, flags); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - list_add_tail(&rxb->list, &rxq->rx_free); - rxq->free_count++; + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + /* + * free the page(s) as well to not break + * the invariant that the items on the used + * list have no page(s) + */ + __free_pages(rxb->page, trans_pcie->rx_page_order); + rxb->page = NULL; + list_add_tail(&rxb->list, &rxq->rx_used); + } else { + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } } else list_add_tail(&rxb->list, &rxq->rx_used); - spin_unlock_irqrestore(&rxq->lock, flags); } -/** - * iwl_rx_handle - Main entry function for receiving responses from uCode - * - * Uses the priv->rx_handlers callback function array to invoke - * the appropriate handlers, including command responses, - * frame-received notifications, and other notifications. +/* + * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ -static void iwl_rx_handle(struct iwl_trans *trans) +static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rx_queue *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq; u32 r, i; u8 fill_rx = 0; u32 count = 8; int total_empty; +restart: + spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; /* Rx interrupt, but nothing sent from uCode */ @@ -536,7 +733,7 @@ static void iwl_rx_handle(struct iwl_trans *trans) IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); - iwl_rx_handle_rxbuf(trans, rxb); + iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; /* If there are a lot of unused frames, @@ -545,60 +742,200 @@ static void iwl_rx_handle(struct iwl_trans *trans) count++; if (count >= 8) { rxq->read = i; - iwl_rx_replenish_now(trans); + spin_unlock(&rxq->lock); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); count = 0; + goto restart; } } } /* Backtrack one entry */ rxq->read = i; + spin_unlock(&rxq->lock); + if (fill_rx) - iwl_rx_replenish_now(trans); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); else - iwl_rx_queue_restock(trans); + iwl_pcie_rxq_restock(trans); + + if (trans_pcie->napi.poll) + napi_gro_flush(&trans_pcie->napi, false); } -/** - * iwl_irq_handle_error - called for HW or SW error interrupt from card +/* + * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ -static void iwl_irq_handle_error(struct iwl_trans *trans) +static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & APMG_PS_CTRL_VAL_RESET_REQ))) { - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); iwl_op_mode_wimax_active(trans->op_mode); - wake_up(&trans->wait_command_queue); + wake_up(&trans_pcie->wait_command_queue); return; } - iwl_dump_csr(trans); + iwl_pcie_dump_csr(trans); iwl_dump_fh(trans, NULL); - iwl_op_mode_nic_error(trans->op_mode); + local_bh_disable(); + /* The STATUS_FW_ERROR bit is set in this function. This must happen + * before we wake up the command caller, to ensure a proper cleanup. */ + iwl_trans_fw_error(trans); + local_bh_enable(); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + wake_up(&trans_pcie->wait_command_queue); } -/* tasklet for iwlagn interrupt */ -void iwl_irq_tasklet(struct iwl_trans *trans) +static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) +{ + u32 inta; + + lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); + + trace_iwlwifi_dev_irq(trans->dev); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(trans, CSR_INT); + + /* the thread will service interrupts and re-enable them */ + return inta; +} + +/* a device (PCI-E) page is 4096 bytes long */ +#define ICT_SHIFT 12 +#define ICT_SIZE (1 << ICT_SHIFT) +#define ICT_COUNT (ICT_SIZE / sizeof(u32)) + +/* interrupt handler using ict table, with this interrupt driver will + * stop using INTA register to get device's interrupt, reading this register + * is expensive, device will write interrupts in ICT dram table, increment + * index then will fire interrupt to driver, driver will OR all ICT table + * entries from current index up to table entry with 0 value. the result is + * the interrupt we need to service, driver will set the entries back to 0 and + * set index. + */ +static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 inta; + u32 val = 0; + u32 read; + + trace_iwlwifi_dev_irq(trans->dev); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); + if (!read) + return 0; + + /* + * Collect all entries up to the first 0, starting from ict_index; + * note we already read at ict_index. + */ + do { + val |= read; + IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", + trans_pcie->ict_index, read); + trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; + trans_pcie->ict_index = + ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); + + read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); + trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, + read); + } while (read); + + /* We should not get this value, just ignore it. */ + if (val == 0xffffffff) + val = 0; + + /* + * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit + * (bit 15 before shifting it to 31) to clear when using interrupt + * coalescing. fortunately, bits 18 and 19 stay set when this happens + * so we use them to decide on the real state of the Rx bit. + * In order words, bit 15 is set if bit 18 or bit 19 are set. + */ + if (val & 0xC0000) + val |= 0x8000; + + inta = (0xff & val) | ((0xff00 & val) << 16); + return inta; +} + +irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) +{ + struct iwl_trans *trans = dev_id; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; u32 handled = 0; - unsigned long flags; - u32 i; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_mask; -#endif - spin_lock_irqsave(&trans_pcie->irq_lock, flags); + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock(&trans_pcie->irq_lock); + + /* dram interrupt table not set yet, + * use legacy interrupt. + */ + if (likely(trans_pcie->use_ict)) + inta = iwl_pcie_int_cause_ict(trans); + else + inta = iwl_pcie_int_cause_non_ict(trans); + + if (iwl_have_debug_level(IWL_DL_ISR)) { + IWL_DEBUG_ISR(trans, + "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", + inta, trans_pcie->inta_mask, + iwl_read32(trans, CSR_INT_MASK), + iwl_read32(trans, CSR_FH_INT_STATUS)); + if (inta & (~trans_pcie->inta_mask)) + IWL_DEBUG_ISR(trans, + "We got a masked interrupt (0x%08x)\n", + inta & (~trans_pcie->inta_mask)); + } + + inta &= trans_pcie->inta_mask; + + /* + * Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. + */ + if (unlikely(!inta)) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + /* + * Re-enable interrupts here since we don't + * have anything to service + */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + iwl_enable_interrupts(trans); + spin_unlock(&trans_pcie->irq_lock); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* + * Hardware disappeared. It might have + * already raised an interrupt. + */ + IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + spin_unlock(&trans_pcie->irq_lock); + goto out; + } /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, @@ -611,24 +948,13 @@ void iwl_irq_tasklet(struct iwl_trans *trans) * hardware bugs here by ACKing all the possible interrupts so that * interrupt coalescing can still be achieved. */ - iwl_write32(trans, CSR_INT, - trans_pcie->inta | ~trans_pcie->inta_mask); + iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); - inta = trans_pcie->inta; - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - /* just for debug */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); + if (iwl_have_debug_level(IWL_DL_ISR)) IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", - inta, inta_mask); - } -#endif + inta, iwl_read32(trans, CSR_INT_MASK)); - /* saved interrupt in inta variable now we can reset trans_pcie->inta */ - trans_pcie->inta = 0; - - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + spin_unlock(&trans_pcie->irq_lock); /* Now service all interrupt bits discovered above. */ if (inta & CSR_INT_BIT_HW_ERR) { @@ -638,19 +964,18 @@ void iwl_irq_tasklet(struct iwl_trans *trans) iwl_disable_interrupts(trans); isr_stats->hw++; - iwl_irq_handle_error(trans); + iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_HW_ERR; - return; + goto out; } -#ifdef CONFIG_IWLWIFI_DEBUG if (iwl_have_debug_level(IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { - IWL_DEBUG_ISR(trans, "Scheduler finished to transmit " - "the frame/frames.\n"); + IWL_DEBUG_ISR(trans, + "Scheduler finished to transmit the frame/frames.\n"); isr_stats->sch++; } @@ -660,7 +985,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans) isr_stats->alive++; } } -#endif + /* Safely ignore these bits for debug checks below */ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); @@ -674,7 +999,17 @@ void iwl_irq_tasklet(struct iwl_trans *trans) isr_stats->rfkill++; - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + if (hw_rfkill) { + set_bit(STATUS_RFKILL, &trans->status); + if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status)) + IWL_DEBUG_RF_KILL(trans, + "Rfkill while SYNC HCMD in flight\n"); + wake_up(&trans_pcie->wait_command_queue); + } else { + clear_bit(STATUS_RFKILL, &trans->status); + } handled |= CSR_INT_BIT_RF_KILL; } @@ -691,17 +1026,15 @@ void iwl_irq_tasklet(struct iwl_trans *trans) IWL_ERR(trans, "Microcode SW error detected. " " Restarting 0x%X.\n", inta); isr_stats->sw++; - iwl_irq_handle_error(trans); + iwl_pcie_irq_handle_error(trans); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - iwl_txq_update_write_ptr(trans, - &trans_pcie->txq[i]); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; @@ -739,8 +1072,6 @@ void iwl_irq_tasklet(struct iwl_trans *trans) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); - iwl_rx_handle(trans); - /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch @@ -753,6 +1084,10 @@ void iwl_irq_tasklet(struct iwl_trans *trans) CSR_INT_PERIODIC_ENA); isr_stats->rx++; + + local_bh_disable(); + iwl_pcie_rx_handle(trans); + local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ @@ -778,11 +1113,15 @@ void iwl_irq_tasklet(struct iwl_trans *trans) /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status)) + if (test_bit(STATUS_INT_ENABLED, &trans->status)) iwl_enable_interrupts(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); + +out: + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_HANDLED; } /****************************************************************************** @@ -791,13 +1130,8 @@ void iwl_irq_tasklet(struct iwl_trans *trans) * ******************************************************************************/ -/* a device (PCI-E) page is 4096 bytes long */ -#define ICT_SHIFT 12 -#define ICT_SIZE (1 << ICT_SHIFT) -#define ICT_COUNT (ICT_SIZE / sizeof(u32)) - /* Free dram table */ -void iwl_free_isr_ict(struct iwl_trans *trans) +void iwl_pcie_free_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -810,18 +1144,17 @@ void iwl_free_isr_ict(struct iwl_trans *trans) } } - /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ -int iwl_alloc_isr_ict(struct iwl_trans *trans) +int iwl_pcie_alloc_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->ict_tbl = - dma_alloc_coherent(trans->dev, ICT_SIZE, + dma_zalloc_coherent(trans->dev, ICT_SIZE, &trans_pcie->ict_tbl_dma, GFP_KERNEL); if (!trans_pcie->ict_tbl) @@ -829,37 +1162,29 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans) /* just an API sanity check ... it is guaranteed to be aligned */ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { - iwl_free_isr_ict(trans); + iwl_pcie_free_ict(trans); return -EINVAL; } - IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n", - (unsigned long long)trans_pcie->ict_tbl_dma); - - IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl); + IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n", + (unsigned long long)trans_pcie->ict_tbl_dma, + trans_pcie->ict_tbl); - /* reset table and index to all 0 */ - memset(trans_pcie->ict_tbl, 0, ICT_SIZE); - trans_pcie->ict_index = 0; - - /* add periodic RX interrupt */ - trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC; return 0; } /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ -void iwl_reset_ict(struct iwl_trans *trans) +void iwl_pcie_reset_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; - unsigned long flags; if (!trans_pcie->ict_tbl) return; - spin_lock_irqsave(&trans_pcie->irq_lock, flags); + spin_lock(&trans_pcie->irq_lock); iwl_disable_interrupts(trans); memset(trans_pcie->ict_tbl, 0, ICT_SIZE); @@ -876,201 +1201,32 @@ void iwl_reset_ict(struct iwl_trans *trans) trans_pcie->ict_index = 0; iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); iwl_enable_interrupts(trans); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); + spin_unlock(&trans_pcie->irq_lock); } /* Device is going down disable ict interrupt usage */ -void iwl_disable_ict(struct iwl_trans *trans) +void iwl_pcie_disable_ict(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - spin_lock_irqsave(&trans_pcie->irq_lock, flags); + spin_lock(&trans_pcie->irq_lock); trans_pcie->use_ict = false; - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); -} - -/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ -static irqreturn_t iwl_isr(int irq, void *data) -{ - struct iwl_trans *trans = data; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u32 inta, inta_mask; -#ifdef CONFIG_IWLWIFI_DEBUG - u32 inta_fh; -#endif - - lockdep_assert_held(&trans_pcie->irq_lock); - - trace_iwlwifi_dev_irq(trans->dev); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ - iwl_write32(trans, CSR_INT_MASK, 0x00000000); - - /* Discover which interrupts are active/pending */ - inta = iwl_read32(trans, CSR_INT); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!inta) { - IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); - goto none; - } - - if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { - /* Hardware disappeared. It might have already raised - * an interrupt */ - IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); - return IRQ_HANDLED; - } - -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_have_debug_level(IWL_DL_ISR)) { - inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, " - "fh 0x%08x\n", inta, inta_mask, inta_fh); - } -#endif - - trans_pcie->inta |= inta; - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta)) - tasklet_schedule(&trans_pcie->irq_tasklet); - else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) - iwl_enable_interrupts(trans); - -none: - /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if disabled by irq and no schedules tasklet. */ - if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) - iwl_enable_interrupts(trans); - - return IRQ_NONE; + spin_unlock(&trans_pcie->irq_lock); } -/* interrupt handler using ict table, with this interrupt driver will - * stop using INTA register to get device's interrupt, reading this register - * is expensive, device will write interrupts in ICT dram table, increment - * index then will fire interrupt to driver, driver will OR all ICT table - * entries from current index up to table entry with 0 value. the result is - * the interrupt we need to service, driver will set the entries back to 0 and - * set index. - */ -irqreturn_t iwl_isr_ict(int irq, void *data) +irqreturn_t iwl_pcie_isr(int irq, void *data) { struct iwl_trans *trans = data; - struct iwl_trans_pcie *trans_pcie; - u32 inta, inta_mask; - u32 val = 0; - u32 read; - unsigned long flags; if (!trans) return IRQ_NONE; - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - spin_lock_irqsave(&trans_pcie->irq_lock, flags); - - /* dram interrupt table not set yet, - * use legacy interrupt. - */ - if (unlikely(!trans_pcie->use_ict)) { - irqreturn_t ret = iwl_isr(irq, data); - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - return ret; - } - - trace_iwlwifi_dev_irq(trans->dev); - - /* Disable (but don't clear!) interrupts here to avoid * back-to-back ISRs and sporadic interrupts from our NIC. * If we have something to service, the tasklet will re-enable ints. * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ iwl_write32(trans, CSR_INT_MASK, 0x00000000); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); - if (!read) { - IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); - goto none; - } - - /* - * Collect all entries up to the first 0, starting from ict_index; - * note we already read at ict_index. - */ - do { - val |= read; - IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", - trans_pcie->ict_index, read); - trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; - trans_pcie->ict_index = - iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); - - read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); - trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, - read); - } while (read); - - /* We should not get this value, just ignore it. */ - if (val == 0xffffffff) - val = 0; - - /* - * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit - * (bit 15 before shifting it to 31) to clear when using interrupt - * coalescing. fortunately, bits 18 and 19 stay set when this happens - * so we use them to decide on the real state of the Rx bit. - * In order words, bit 15 is set if bit 18 or bit 19 are set. - */ - if (val & 0xC0000) - val |= 0x8000; - - inta = (0xff & val) | ((0xff00 & val) << 16); - IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", - inta, inta_mask, val); - - inta &= trans_pcie->inta_mask; - trans_pcie->inta |= inta; - - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta)) - tasklet_schedule(&trans_pcie->irq_tasklet); - else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) { - /* Allow interrupt if was disabled by this handler and - * no tasklet was schedules, We should not enable interrupt, - * tasklet will enable it. - */ - iwl_enable_interrupts(trans); - } - - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - return IRQ_HANDLED; - - none: - /* re-enable interrupts here since we don't have anything to service. - * only Re-enable if disabled by irq. - */ - if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && - !trans_pcie->inta) - iwl_enable_interrupts(trans); - - spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); - return IRQ_NONE; + return IRQ_WAKE_THREAD; } |
