diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/drv.c | 88 | ||||
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/internal.h | 37 | ||||
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 134 | ||||
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/trans.c | 555 | ||||
| -rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/tx.c | 262 |
5 files changed, 732 insertions, 344 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 3872ead7548..98950e45c7b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -66,6 +66,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-aspm.h> +#include <linux/acpi.h> #include "iwl-trans.h" #include "iwl-drv.h" @@ -366,18 +367,21 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, @@ -389,12 +393,92 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + +/* 8000 Series */ + {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} }; MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); +#ifdef CONFIG_ACPI +#define SPL_METHOD "SPLC" +#define SPL_DOMAINTYPE_MODULE BIT(0) +#define SPL_DOMAINTYPE_WIFI BIT(1) +#define SPL_DOMAINTYPE_WIGIG BIT(2) +#define SPL_DOMAINTYPE_RFEM BIT(3) + +static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) +{ + union acpi_object *limits, *domain_type, *power_limit; + + if (splx->type != ACPI_TYPE_PACKAGE || + splx->package.count != 2 || + splx->package.elements[0].type != ACPI_TYPE_INTEGER || + splx->package.elements[0].integer.value != 0) { + IWL_ERR(trans, "Unsupported splx structure\n"); + return 0; + } + + limits = &splx->package.elements[1]; + if (limits->type != ACPI_TYPE_PACKAGE || + limits->package.count < 2 || + limits->package.elements[0].type != ACPI_TYPE_INTEGER || + limits->package.elements[1].type != ACPI_TYPE_INTEGER) { + IWL_ERR(trans, "Invalid limits element\n"); + return 0; + } + + domain_type = &limits->package.elements[0]; + power_limit = &limits->package.elements[1]; + if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { + IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); + return 0; + } + + return power_limit->integer.value; +} + +static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) +{ + acpi_handle pxsx_handle; + acpi_handle handle; + struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + + pxsx_handle = ACPI_HANDLE(&pdev->dev); + if (!pxsx_handle) { + IWL_DEBUG_INFO(trans, + "Could not retrieve root port ACPI handle\n"); + return; + } + + /* Get the method's handle */ + status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_INFO(trans, "SPL method not found\n"); + return; + } + + /* Call SPLC with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &splx); + if (ACPI_FAILURE(status)) { + IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); + return; + } + + trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); + IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", + trans->dflt_pwr_limit); + kfree(splx.pointer); +} + +#else /* CONFIG_ACPI */ +static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {} +#endif + /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 @@ -419,6 +503,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_trans; } + set_dflt_pwr_limit(iwl_trans, pdev); + /* register transport layer debugfs here */ ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir); if (ret) @@ -477,7 +563,7 @@ static int iwl_pci_resume(struct device *device) iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); return 0; } diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index e851f26fd44..6c22b23a284 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -102,7 +102,7 @@ struct iwl_rxq { u32 write_actual; struct list_head rx_free; struct list_head rx_used; - int need_update; + bool need_update; struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; @@ -117,21 +117,19 @@ struct iwl_dma_ptr { /** * iwl_queue_inc_wrap - increment queue index, wrap back to beginning * @index -- current index - * @n_bd -- total number of entries in queue (must be power of 2) */ -static inline int iwl_queue_inc_wrap(int index, int n_bd) +static inline int iwl_queue_inc_wrap(int index) { - return ++index & (n_bd - 1); + return ++index & (TFD_QUEUE_SIZE_MAX - 1); } /** * iwl_queue_dec_wrap - decrement queue index, wrap back to end * @index -- current index - * @n_bd -- total number of entries in queue (must be power of 2) */ -static inline int iwl_queue_dec_wrap(int index, int n_bd) +static inline int iwl_queue_dec_wrap(int index) { - return --index & (n_bd - 1); + return --index & (TFD_QUEUE_SIZE_MAX - 1); } struct iwl_cmd_meta { @@ -145,13 +143,13 @@ struct iwl_cmd_meta { * * Contains common data for Rx and Tx queues. * - * Note the difference between n_bd and n_window: the hardware - * always assumes 256 descriptors, so n_bd is always 256 (unless + * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware + * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless * there might be HW changes in the future). For the normal TX * queues, n_window, which is the size of the software queue data * is also 256; however, for the command queue, n_window is only * 32 since we don't need so many commands pending. Since the HW - * still uses 256 BDs for DMA though, n_bd stays 256. As a result, + * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, * the software buffers (in the variables @meta, @txb in struct * iwl_txq) only have 32 entries, while the HW buffers (@tfds in * the same struct) have 256. @@ -162,7 +160,6 @@ struct iwl_cmd_meta { * data is a window overlayed over the HW queue. */ struct iwl_queue { - int n_bd; /* number of BDs in this queue */ int write_ptr; /* 1-st empty entry (index) host_w*/ int read_ptr; /* last used entry (index) host_r*/ /* use for monitoring and recovering the stuck queue */ @@ -231,7 +228,7 @@ struct iwl_txq { spinlock_t lock; struct timer_list stuck_timer; struct iwl_trans_pcie *trans_pcie; - u8 need_update; + bool need_update; u8 active; bool ampdu; }; @@ -270,6 +267,9 @@ struct iwl_trans_pcie { struct iwl_trans *trans; struct iwl_drv *drv; + struct net_device napi_dev; + struct napi_struct napi; + /* INT ICT Table */ __le32 *ict_tbl; dma_addr_t ict_tbl_dma; @@ -304,7 +304,7 @@ struct iwl_trans_pcie { bool bc_table_dword; u32 rx_page_order; - const char **command_names; + const char *const *command_names; /* queue watchdog */ unsigned long wd_timeout; @@ -362,7 +362,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); -void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, int handler_status); @@ -370,6 +370,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); +static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + /***************************************************** * Error handling ******************************************************/ @@ -488,4 +495,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); } +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 08c23d497a0..a2698e5e062 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -145,48 +145,49 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; u32 reg; - spin_lock(&rxq->lock); + lockdep_assert_held(&rxq->lock); - if (rxq->need_update == 0) - goto exit_unlock; + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", + reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + rxq->need_update = true; + return; + } + } - if (trans->cfg->base_params->shadow_reg_enable) { - /* shadow register enabled */ - /* Device expects a multiple of 8 */ - rxq->write_actual = (rxq->write & ~0x7); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); - } else { - /* If power-saving is in use, make sure device is awake */ - if (test_bit(STATUS_TPOWER_PMI, &trans->status)) { - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, - "Rx queue requesting wakeup," - " GP1 = 0x%x\n", reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - goto exit_unlock; - } + rxq->write_actual = round_down(rxq->write, 8); + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); +} - rxq->write_actual = (rxq->write & ~0x7); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - rxq->write_actual); +static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; - /* Else device is assumed to be awake */ - } else { - /* Device expects a multiple of 8 */ - rxq->write_actual = (rxq->write & ~0x7); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR, - rxq->write_actual); - } - } - rxq->need_update = 0; + spin_lock(&rxq->lock); + + if (!rxq->need_update) + goto exit_unlock; + + iwl_pcie_rxq_inc_wr_ptr(trans); + rxq->need_update = false; exit_unlock: spin_unlock(&rxq->lock); @@ -247,9 +248,8 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); - rxq->need_update = 1; + iwl_pcie_rxq_inc_wr_ptr(trans); spin_unlock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans, rxq); } } @@ -373,20 +373,9 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * Also restock the Rx queue via iwl_pcie_rxq_restock. * This is called as a scheduled work item (except for during initialization) */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); - - spin_lock(&trans_pcie->irq_lock); - iwl_pcie_rxq_restock(trans); - spin_unlock(&trans_pcie->irq_lock); -} - -static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) { - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, gfp); iwl_pcie_rxq_restock(trans); } @@ -396,7 +385,7 @@ static void iwl_pcie_rx_replenish_work(struct work_struct *data) struct iwl_trans_pcie *trans_pcie = container_of(data, struct iwl_trans_pcie, rx_replenish); - iwl_pcie_rx_replenish(trans_pcie->trans); + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) @@ -532,14 +521,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans); + iwl_pcie_rx_replenish(trans, GFP_KERNEL); iwl_pcie_rx_hw_init(trans, rxq); - spin_lock(&trans_pcie->irq_lock); - rxq->need_update = 1; - iwl_pcie_rxq_inc_wr_ptr(trans, rxq); - spin_unlock(&trans_pcie->irq_lock); + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans); + spin_unlock(&rxq->lock); return 0; } @@ -684,7 +672,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, /* Reuse the page if possible. For notification packets and * SKBs that fail to Rx correctly, add them back into the * rx_free list for reuse later. */ - spin_lock(&rxq->lock); if (rxb->page != NULL) { rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, @@ -705,7 +692,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } } else list_add_tail(&rxb->list, &rxq->rx_used); - spin_unlock(&rxq->lock); } /* @@ -720,6 +706,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) u32 count = 8; int total_empty; +restart: + spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; @@ -754,18 +742,25 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) count++; if (count >= 8) { rxq->read = i; - iwl_pcie_rx_replenish_now(trans); + spin_unlock(&rxq->lock); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); count = 0; + goto restart; } } } /* Backtrack one entry */ rxq->read = i; + spin_unlock(&rxq->lock); + if (fill_rx) - iwl_pcie_rx_replenish_now(trans); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); else iwl_pcie_rxq_restock(trans); + + if (trans_pcie->napi.poll) + napi_gro_flush(&trans_pcie->napi, false); } /* @@ -802,10 +797,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 inta; - lockdep_assert_held(&trans_pcie->irq_lock); + lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); trace_iwlwifi_dev_irq(trans->dev); @@ -856,7 +850,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) trans_pcie->ict_index, read); trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; trans_pcie->ict_index = - iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT); + ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, @@ -888,7 +882,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta = 0; u32 handled = 0; - u32 i; lock_map_acquire(&trans->sync_cmd_lockdep_map); @@ -1006,7 +999,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rfkill++; - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); if (hw_rfkill) { set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, @@ -1040,9 +1033,8 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq); - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) - iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); isr_stats->wakeup++; @@ -1080,8 +1072,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) iwl_write8(trans, CSR_INT_PERIODIC_REG, CSR_INT_PERIODIC_DIS); - iwl_pcie_rx_handle(trans); - /* * Enable periodic interrupt in 8 msec only if we received * real RX interrupt (instead of just periodic int), to catch @@ -1094,6 +1084,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) CSR_INT_PERIODIC_ENA); isr_stats->rx++; + + local_bh_disable(); + iwl_pcie_rx_handle(trans); + local_bh_enable(); } /* This "Tx" DMA channel is used only for loading uCode */ diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index f9507807b48..788085bc65d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -73,8 +73,23 @@ #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-agn-hw.h" +#include "iwl-fw-error-dump.h" #include "internal.h" +static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (2 << 28))); + return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); +} + +static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) +{ + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); + iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, + ((reg & 0x0000ffff) | (3 << 28))); +} + static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) */ /* Disable L0S exit timer (platform NMI Work/Around) */ - iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, - CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* * Disable L0s without affecting L1; @@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) /* * Enable DMA clock and wait for it to stabilize. * - * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits - * do not disable clocks. This preserves any hardware bits already - * set by default in "CLK_CTRL_REG" after reset. + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" + * bits do not disable clocks. This preserves any hardware + * bits already set by default in "CLK_CTRL_REG" after reset. */ - iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); - udelay(20); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + iwl_write_prph(trans, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); - /* Disable L1-Active */ - iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + /* Disable L1-Active */ + iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); - /* Clear the interrupt in APMG if the NIC is in RFKILL */ - iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL); + /* Clear the interrupt in APMG if the NIC is in RFKILL */ + iwl_write_prph(trans, APMG_RTC_INT_STT_REG, + APMG_RTC_INT_STT_RFKILL); + } set_bit(STATUS_DEVICE_ENABLED, &trans->status); @@ -223,6 +243,116 @@ out: return ret; } +/* + * Enable LP XTAL to avoid HW bug where device may consume much power if + * FW is not loaded after device reset. LP XTAL is disabled by default + * after device HW reset. Do it only if XTAL is fed by internal source. + * Configure device's "persistence" mode to avoid resetting XTAL again when + * SHRD_HW_RST occurs in S3. + */ +static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) +{ + int ret; + u32 apmg_gp1_reg; + u32 apmg_xtal_cfg_reg; + u32 dl_cfg_reg; + + /* Force XTAL ON */ + __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + + /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is possible. + */ + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + 25000); + if (WARN_ON(ret < 0)) { + IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + return; + } + + /* + * Clear "disable persistence" to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_PERSIST_DIS); + + /* + * Force APMG XTAL to be active to prevent its disabling by HW + * caused by APMG idle state. + */ + apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, + SHR_APMG_XTAL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg | + SHR_APMG_XTAL_CFG_XTAL_ON_REQ); + + /* + * Reset entire device again - do controller reset (results in + * SHRD_HW_RST). Turn MAC off before proceeding. + */ + iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* Enable LP XTAL by indirect access through CSR */ + apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | + SHR_APMG_GP1_WF_XTAL_LP_EN | + SHR_APMG_GP1_CHICKEN_BIT_SELECT); + + /* Clear delay line clock power up */ + dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); + iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & + ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); + + /* + * Enable persistence mode to avoid LP XTAL resetting when + * SHRD_HW_RST is applied in S3. + */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* Activates XTAL resources monitor */ + __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, + CSR_MONITOR_XTAL_RESOURCES); + + /* Release XTAL ON request */ + __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_XTAL_ON); + udelay(10); + + /* Release APMG XTAL */ + iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, + apmg_xtal_cfg_reg & + ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); +} + static int iwl_pcie_apm_stop_master(struct iwl_trans *trans) { int ret = 0; @@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans) /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); + if (trans->cfg->lp_xtal_workaround) { + iwl_pcie_apm_lp_xtal_enable(trans); + return; + } + /* Reset the entire device */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); @@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); - iwl_pcie_set_pwr(trans, false); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); @@ -318,6 +454,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; + int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -326,18 +463,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; - /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); + for (iter = 0; iter < 10; iter++) { + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + do { + ret = iwl_pcie_set_hw_ready(trans); + if (ret >= 0) + return 0; - do { - ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + usleep_range(200, 1000); + t += 200; + } while (t < 150000); + msleep(25); + } - usleep_range(200, 1000); - t += 200; - } while (t < 150000); + IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); return ret; } @@ -435,78 +577,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, return ret; } -static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu) +static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) { int shift_param; - u32 address; - int ret = 0; + int i, ret = 0; + u32 last_read_idx = 0; if (cpu == 1) { shift_param = 0; - address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR; + *first_ucode_section = 0; } else { shift_param = 16; - address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR; + (*first_ucode_section)++; } - /* set CPU to started */ - iwl_trans_set_bits_mask(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - CSR_CPU_STATUS_LOADING_STARTED << shift_param, - 1); - - /* set last complete descriptor number */ - iwl_trans_set_bits_mask(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED - << shift_param, - 1); - - /* set last loaded block */ - iwl_trans_set_bits_mask(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK - << shift_param, - 1); + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + last_read_idx = i; + + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + if (i == (*first_ucode_section) + 1) + /* set CPU to started */ + iwl_set_bits_prph(trans, + CSR_UCODE_LOAD_STATUS_ADDR, + LMPM_CPU_HDRS_LOADING_COMPLETED + << shift_param); + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; + } /* image loading complete */ - iwl_trans_set_bits_mask(trans, - CSR_UCODE_LOAD_STATUS_ADDR, - CSR_CPU_STATUS_LOADING_COMPLETED - << shift_param, - 1); - - /* set FH_TCSR_0_REG */ - iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1); - - /* verify image verification started */ - ret = iwl_poll_bit(trans, address, - CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, - CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS, - CSR_SECURE_TIME_OUT); - if (ret < 0) { - IWL_ERR(trans, "secure boot process didn't start\n"); - return ret; + iwl_set_bits_prph(trans, + CSR_UCODE_LOAD_STATUS_ADDR, + LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param); + + *first_ucode_section = last_read_idx; + + return 0; +} + +static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, + const struct fw_img *image, + int cpu, + int *first_ucode_section) +{ + int shift_param; + int i, ret = 0; + u32 last_read_idx = 0; + + if (cpu == 1) { + shift_param = 0; + *first_ucode_section = 0; + } else { + shift_param = 16; + (*first_ucode_section)++; } - /* wait for image verification to complete */ - ret = iwl_poll_bit(trans, address, - CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED, - CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED, - CSR_SECURE_TIME_OUT); + for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { + last_read_idx = i; - if (ret < 0) { - IWL_ERR(trans, "Time out on secure boot process\n"); - return ret; + if (!image->sec[i].data || + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + IWL_DEBUG_FW(trans, + "Break since Data not valid or Empty section, sec = %d\n", + i); + break; + } + + ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + if (ret) + return ret; } + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + iwl_set_bits_prph(trans, + CSR_UCODE_LOAD_STATUS_ADDR, + (LMPM_CPU_UCODE_LOADING_COMPLETED | + LMPM_CPU_HDRS_LOADING_COMPLETED | + LMPM_CPU_UCODE_LOADING_STARTED) << + shift_param); + + *first_ucode_section = last_read_idx; + return 0; } static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { - int i, ret = 0; + int ret = 0; + int first_ucode_section; IWL_DEBUG_FW(trans, "working with %s image\n", @@ -518,53 +688,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, /* configure the ucode to be ready to get the secured image */ if (image->is_secure) { /* set secure boot inspector addresses */ - iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0); - iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0); - - /* release CPU1 reset if secure inspector image burned in OTP */ - iwl_write32(trans, CSR_RESET, 0); - } - - /* load to FW the binary sections of CPU1 */ - IWL_DEBUG_INFO(trans, "Loading CPU1\n"); - for (i = 0; - i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; - i++) { - if (!image->sec[i].data) - break; - ret = iwl_pcie_load_section(trans, i, &image->sec[i]); + iwl_write_prph(trans, + LMPM_SECURE_INSPECTOR_CODE_ADDR, + LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE); + + iwl_write_prph(trans, + LMPM_SECURE_INSPECTOR_DATA_ADDR, + LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE); + + /* set CPU1 header address */ + iwl_write_prph(trans, + LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR, + LMPM_SECURE_CPU1_HDR_MEM_SPACE); + + /* load to FW the binary Secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1, + &first_ucode_section); if (ret) return ret; - } - /* configure the ucode to start secure process on CPU1 */ - if (image->is_secure) { - /* config CPU1 to start secure protocol */ - ret = iwl_pcie_secure_set(trans, 1); + } else { + /* load to FW the binary Non secured sections of CPU1 */ + ret = iwl_pcie_load_cpu_sections(trans, image, 1, + &first_ucode_section); if (ret) return ret; - } else { - /* Remove all resets to allow NIC to operate */ - iwl_write32(trans, CSR_RESET, 0); } if (image->is_dual_cpus) { + /* set CPU2 header address */ + iwl_write_prph(trans, + LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, + LMPM_SECURE_CPU2_HDR_MEM_SPACE); + /* load to FW the binary sections of CPU2 */ - IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n"); - for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU; - i < IWL_UCODE_SECTION_MAX; i++) { - if (!image->sec[i].data) - break; - ret = iwl_pcie_load_section(trans, i, &image->sec[i]); - if (ret) - return ret; - } + if (image->is_secure) + ret = iwl_pcie_load_cpu_secured_sections( + trans, image, 2, + &first_ucode_section); + else + ret = iwl_pcie_load_cpu_sections(trans, image, 2, + &first_ucode_section); + if (ret) + return ret; + } - if (image->is_secure) { - /* set CPU2 for secure protocol */ - ret = iwl_pcie_secure_set(trans, 2); - if (ret) - return ret; + /* release CPU reset */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); + else + iwl_write32(trans, CSR_RESET, 0); + + if (image->is_secure) { + /* wait for image verification to complete */ + ret = iwl_poll_prph_bit(trans, + LMPM_SECURE_BOOT_CPU1_STATUS_ADDR, + LMPM_SECURE_BOOT_STATUS_SUCCESS, + LMPM_SECURE_BOOT_STATUS_SUCCESS, + LMPM_SECURE_TIME_OUT); + + if (ret < 0) { + IWL_ERR(trans, "Time out on secure boot process\n"); + return ret; } } @@ -591,7 +776,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, set_bit(STATUS_RFKILL, &trans->status); else clear_bit(STATUS_RFKILL, &trans->status); - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); if (hw_rfkill && !run_in_rfkill) return -ERFKILL; @@ -706,7 +891,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) else clear_bit(STATUS_RFKILL, &trans->status); if (hw_rfkill != was_hw_rfkill) - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); +} + +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) +{ + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) + iwl_trans_pcie_stop_device(trans); } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) @@ -815,7 +1006,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) set_bit(STATUS_RFKILL, &trans->status); else clear_bit(STATUS_RFKILL, &trans->status); - iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); return 0; } @@ -868,6 +1059,12 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ + WARN_ON(1); + return 0; +} + static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { @@ -894,6 +1091,18 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + * As this function may be called again in some corner cases don't + * do anything if NAPI was already initialized. + */ + if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { + init_dummy_netdev(&trans_pcie->napi_dev); + iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, + &trans_pcie->napi_dev, + iwl_pcie_dummy_napi_poll, 64); + } } void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -914,6 +1123,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) pci_disable_device(trans_pcie->pci_dev); kmem_cache_destroy(trans->dev_cmd_pool); + if (trans_pcie->napi.poll) + netif_napi_del(&trans_pcie->napi); + kfree(trans); } @@ -1052,7 +1264,7 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, #define IWL_FLUSH_WAIT_MS 2000 -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) +static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; @@ -1065,13 +1277,31 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) /* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u8 wr_ptr; + if (cnt == trans_pcie->cmd_queue) continue; + if (!test_bit(cnt, trans_pcie->queue_used)) + continue; + if (!(BIT(cnt) & txq_bm)) + continue; + + IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); txq = &trans_pcie->txq[cnt]; q = &txq->q; - while (q->read_ptr != q->write_ptr && !time_after(jiffies, - now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) + wr_ptr = ACCESS_ONCE(q->write_ptr); + + while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && + !time_after(jiffies, + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { + u8 write_ptr = ACCESS_ONCE(q->write_ptr); + + if (WARN_ONCE(wr_ptr != write_ptr, + "WR pointer moved while flushing %d -> %d\n", + wr_ptr, write_ptr)) + return -ETIMEDOUT; msleep(1); + } if (q->read_ptr != q->write_ptr) { IWL_ERR(trans, @@ -1079,6 +1309,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) ret = -ETIMEDOUT; break; } + IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); } if (!ret) @@ -1113,8 +1344,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) IWL_ERR(trans, "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", cnt, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, - SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1), + iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & + (TFD_QUEUE_SIZE_MAX - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); } @@ -1158,6 +1389,7 @@ static const char *get_csr_string(int cmd) IWL_CMD(CSR_GIO_CHICKEN_BITS); IWL_CMD(CSR_ANA_PLL_CFG); IWL_CMD(CSR_HW_REV_WA_REG); + IWL_CMD(CSR_MONITOR_STATUS_REG); IWL_CMD(CSR_DBG_HPET_MEM_REG); default: return "UNKNOWN"; @@ -1190,6 +1422,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans) CSR_DRAM_INT_TBL_REG, CSR_GIO_CHICKEN_BITS, CSR_ANA_PLL_CFG, + CSR_MONITOR_STATUS_REG, CSR_HW_REV_WA_REG, CSR_DBG_HPET_MEM_REG }; @@ -1407,16 +1640,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, { struct iwl_trans *trans = file->private_data; char *buf = NULL; - int pos = 0; - ssize_t ret = -EFAULT; - - ret = pos = iwl_dump_fh(trans, &buf); - if (buf) { - ret = simple_read_from_buffer(user_buf, - count, ppos, buf, pos); - kfree(buf); - } + ssize_t ret; + ret = iwl_dump_fh(trans, &buf); + if (ret < 0) + return ret; + if (!buf) + return -EINVAL; + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); return ret; } @@ -1444,6 +1676,61 @@ err: IWL_ERR(trans, "failed to create the trans debugfs entry\n"); return -ENOMEM; } + +static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd) +{ + u32 cmdlen = 0; + int i; + + for (i = 0; i < IWL_NUM_OF_TBS; i++) + cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i); + + return cmdlen; +} + +static u32 iwl_trans_pcie_dump_data(struct iwl_trans *trans, + void *buf, u32 buflen) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_fw_error_dump_data *data; + struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_fw_error_dump_txcmd *txcmd; + u32 len; + int i, ptr; + + if (!buf) + return sizeof(*data) + + cmdq->q.n_window * (sizeof(*txcmd) + + TFD_MAX_PAYLOAD_SIZE); + + len = 0; + data = buf; + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); + txcmd = (void *)data->data; + spin_lock_bh(&cmdq->lock); + ptr = cmdq->q.write_ptr; + for (i = 0; i < cmdq->q.n_window; i++) { + u8 idx = get_cmd_index(&cmdq->q, ptr); + u32 caplen, cmdlen; + + cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]); + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + + if (cmdlen) { + len += sizeof(*txcmd) + caplen; + txcmd->cmdlen = cpu_to_le32(cmdlen); + txcmd->caplen = cpu_to_le32(caplen); + memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); + txcmd = (void *)((u8 *)txcmd->data + caplen); + } + + ptr = iwl_queue_dec_wrap(ptr); + } + spin_unlock_bh(&cmdq->lock); + + data->len = cpu_to_le32(len); + return sizeof(*data) + len; +} #else static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, struct dentry *dir) @@ -1486,6 +1773,10 @@ static const struct iwl_trans_ops trans_ops_pcie = { .grab_nic_access = iwl_trans_pcie_grab_nic_access, .release_nic_access = iwl_trans_pcie_release_nic_access, .set_bits_mask = iwl_trans_pcie_set_bits_mask, + +#ifdef CONFIG_IWLWIFI_DEBUGFS + .dump_data = iwl_trans_pcie_dump_data, +#endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -1563,6 +1854,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@ -1574,8 +1869,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@ -1601,8 +1894,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_pci_disable_msi; } - trans_pcie->inta_mask = CSR_INI_SET_MASK; - if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; @@ -1614,6 +1905,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_free_ict; } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + return trans; out_free_ict: diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 3d549008b3e..038940afbdc 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -70,20 +70,20 @@ static int iwl_queue_space(const struct iwl_queue *q) /* * To avoid ambiguity between empty and completely full queues, there - * should always be less than q->n_bd elements in the queue. - * If q->n_window is smaller than q->n_bd, there is no need to reserve - * any queue entries for this purpose. + * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. + * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need + * to reserve any queue entries for this purpose. */ - if (q->n_window < q->n_bd) + if (q->n_window < TFD_QUEUE_SIZE_MAX) max = q->n_window; else - max = q->n_bd - 1; + max = TFD_QUEUE_SIZE_MAX - 1; /* - * q->n_bd is a power of 2, so the following is equivalent to modulo by - * q->n_bd and is well defined for negative dividends. + * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to + * modulo by TFD_QUEUE_SIZE_MAX and is well defined. */ - used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1); + used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); if (WARN_ON(used > max)) return 0; @@ -94,17 +94,11 @@ static int iwl_queue_space(const struct iwl_queue *q) /* * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) +static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) { - q->n_bd = count; q->n_window = slots_num; q->id = id; - /* count must be power-of-two size, otherwise iwl_queue_inc_wrap - * and iwl_queue_dec_wrap are broken. */ - if (WARN_ON(!is_power_of_2(count))) - return -EINVAL; - /* slots_num must be power-of-two size, otherwise * get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) @@ -197,17 +191,17 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) IWL_ERR(trans, "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, - SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), + iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & + (TFD_QUEUE_SIZE_MAX - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); } for (i = q->read_ptr; i != q->write_ptr; - i = iwl_queue_inc_wrap(i, q->n_bd)) + i = iwl_queue_inc_wrap(i)) IWL_ERR(trans, "scratch %d = 0x%08x\n", i, le32_to_cpu(txq->scratchbufs[i].scratch)); - iwl_trans_fw_error(trans); + iwl_force_nmi(trans); } /* @@ -287,53 +281,64 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ -void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) +static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_txq *txq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->q.id; - if (txq->need_update == 0) - return; + lockdep_assert_held(&txq->lock); - if (trans->cfg->base_params->shadow_reg_enable || - txq_id == trans_pcie->cmd_queue) { - /* shadow register enabled */ - iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); - } else { - /* if we're trying to save power */ - if (test_bit(STATUS_TPOWER_PMI, &trans->status)) { - /* wake up nic if it's powered down ... - * uCode will wake up, and interrupt us again, so next - * time we'll skip this part. */ - reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); - - if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(trans, - "Tx queue %d requesting wakeup," - " GP1 = 0x%x\n", txq_id, reg); - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - return; - } + /* + * explicitly wake up the NIC if: + * 1. shadow registers aren't enabled + * 2. NIC is woken up for CMD regardless of shadow outside this function + * 3. there is a chance that the NIC is asleep + */ + if (!trans->cfg->base_params->shadow_reg_enable && + txq_id != trans_pcie->cmd_queue && + test_bit(STATUS_TPOWER_PMI, &trans->status)) { + /* + * wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. + */ + reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + txq->need_update = true; + return; + } + } + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr); + iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); +} - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, - txq->q.write_ptr); +void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = &trans_pcie->txq[i]; - /* - * else not in power-save mode, - * uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - } else - iwl_write32(trans, HBUS_TARG_WRPTR, - txq->q.write_ptr | (txq_id << 8)); + spin_lock_bh(&txq->lock); + if (trans_pcie->txq[i].need_update) { + iwl_pcie_txq_inc_wr_ptr(trans, txq); + trans_pcie->txq[i].need_update = false; + } + spin_unlock_bh(&txq->lock); } - txq->need_update = 0; } static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) @@ -348,13 +353,6 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) return addr; } -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, dma_addr_t addr, u16 len) { @@ -414,13 +412,17 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) { struct iwl_tfd *tfd_tmp = txq->tfds; - /* rd_ptr is bounded by n_bd and idx is bounded by n_window */ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ int rd_ptr = txq->q.read_ptr; int idx = get_cmd_index(&txq->q, rd_ptr); lockdep_assert_held(&txq->lock); - /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]); /* free SKB */ @@ -441,7 +443,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) } static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - dma_addr_t addr, u16 len, u8 reset) + dma_addr_t addr, u16 len, bool reset) { struct iwl_queue *q; struct iwl_tfd *tfd, *tfd_tmp; @@ -547,15 +549,14 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, { int ret; - txq->need_update = 0; + txq->need_update = false; /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, - txq_id); + ret = iwl_queue_init(&txq->q, slots_num, txq_id); if (ret) return ret; @@ -580,15 +581,12 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; - if (!q->n_bd) - return; - spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, q->read_ptr); iwl_pcie_txq_free_tfd(trans, txq); - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } txq->active = false; spin_unlock_bh(&txq->lock); @@ -625,10 +623,12 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) } /* De-alloc circular buffer of TFDs */ - if (txq->q.n_bd) { - dma_free_coherent(dev, sizeof(struct iwl_tfd) * - txq->q.n_bd, txq->tfds, txq->q.dma_addr); + if (txq->tfds) { + dma_free_coherent(dev, + sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX, + txq->tfds, txq->q.dma_addr); txq->q.dma_addr = 0; + txq->tfds = NULL; dma_free_coherent(dev, sizeof(*txq->scratchbufs) * txq->q.n_window, @@ -685,7 +685,8 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + if (trans->cfg->base_params->scd_chain_ext_wa) + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, trans_pcie->cmd_fifo); @@ -705,8 +706,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, - APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) @@ -935,8 +937,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - /* n_bd is usually 256 => n_bd - 1 = 0xff */ - int tfd_num = ssn & (txq->q.n_bd - 1); + int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); struct iwl_queue *q = &txq->q; int last_to_free; @@ -960,12 +961,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd); + last_to_free = iwl_queue_dec_wrap(tfd_num); if (!iwl_queue_used(q, last_to_free)) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, q->n_bd, + __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, q->write_ptr, q->read_ptr); goto out; } @@ -975,7 +976,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; q->read_ptr != tfd_num; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) continue; @@ -1014,25 +1015,26 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) lockdep_assert_held(&txq->lock); - if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) { + if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(q, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, q->n_bd, + __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, q->write_ptr, q->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + for (idx = iwl_queue_inc_wrap(idx); q->read_ptr != idx; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr)) { if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); - iwl_trans_fw_error(trans); + iwl_force_nmi(trans); } } - if (q->read_ptr == q->write_ptr) { + if (trans->cfg->base_params->apmg_wake_up_wa && + q->read_ptr == q->write_ptr) { spin_lock_irqsave(&trans_pcie->reg_lock, flags); WARN_ON(!trans_pcie->cmd_in_flight); trans_pcie->cmd_in_flight = false; @@ -1313,28 +1315,39 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, cmd_pos = offsetof(struct iwl_device_cmd, payload); copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - int copy = 0; + int copy; if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; - - if (copy > cmd->len[i]) - copy = cmd->len[i]; - } - /* copy everything if not nocopy/dup */ if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | - IWL_HCMD_DFL_DUP))) + IWL_HCMD_DFL_DUP))) { copy = cmd->len[i]; - if (copy) { memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); cmd_pos += copy; copy_size += copy; + continue; + } + + /* + * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied + * in total (for the scratchbuf handling), but copy up to what + * we can fit into the payload for debug dump purposes. + */ + copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); + + memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); + cmd_pos += copy; + + /* However, treat copy_size the proper way, we need it below */ + if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { + copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + + if (copy > cmd->len[i]) + copy = cmd->len[i]; + copy_size += copy; } } @@ -1349,7 +1362,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); iwl_pcie_txq_build_tfd(trans, txq, iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), - scratch_size, 1); + scratch_size, true); /* map first command fragment, if any remains */ if (copy_size > scratch_size) { @@ -1365,7 +1378,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, - copy_size - scratch_size, 0); + copy_size - scratch_size, false); } /* map the remaining (adjusted) nocopy/dup fragments */ @@ -1388,7 +1401,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto out; } - iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0); + iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } out_meta->flags = cmd->flags; @@ -1396,8 +1409,6 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, kfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - txq->need_update = 1; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); /* start timer if queue currently empty */ @@ -1409,9 +1420,11 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands - * returned. + * returned. This needs to be done only on NICs that have + * apmg_wake_up_wa set. */ - if (!trans_pcie->cmd_in_flight) { + if (trans->cfg->base_params->apmg_wake_up_wa && + !trans_pcie->cmd_in_flight) { trans_pcie->cmd_in_flight = true; __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); @@ -1431,7 +1444,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* Increment and update queue's write index */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -1587,6 +1600,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, get_cmd_string(trans_pcie, cmd->id)); ret = -ETIMEDOUT; + iwl_force_nmi(trans); iwl_trans_fw_error(trans); goto cancel; @@ -1664,7 +1678,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, dma_addr_t tb0_phys, tb1_phys, scratch_phys; void *tb1_addr; u16 len, tb1_len, tb2_len; - u8 wait_write_ptr = 0; + bool wait_write_ptr; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); u16 wifi_seq; @@ -1725,7 +1739,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE); iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, - IWL_HCMD_SCRATCHBUF_SIZE, 1); + IWL_HCMD_SCRATCHBUF_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); @@ -1735,7 +1749,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) goto out_err; - iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0); + iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); /* * Set up TFD's third entry to point directly to remainder @@ -1751,7 +1765,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &txq->tfds[q->write_ptr]); goto out_err; } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0); + iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); } /* Set up entry for this TFD in Tx byte-count array */ @@ -1765,12 +1779,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx_data(trans->dev, skb, skb->data + hdr_len, tb2_len); - if (!ieee80211_has_morefrags(fc)) { - txq->need_update = 1; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } + wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (txq->need_update && q->read_ptr == q->write_ptr && @@ -1778,22 +1787,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_pcie_txq_inc_wr_ptr(trans, txq); + q->write_ptr = iwl_queue_inc_wrap(q->write_ptr); + if (!wait_write_ptr) + iwl_pcie_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually, - * regardless of the value of ret. "ret" only indicates - * whether or not we should update the write pointer. + * and we will get a TX status notification eventually. */ if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) { - txq->need_update = 1; + if (wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); - } else { + else iwl_stop_queue(trans, txq); - } } spin_unlock(&txq->lock); return 0; |
