diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/pci.c')
| -rw-r--r-- | drivers/net/wireless/ath/ath10k/pci.c | 1818 |
1 files changed, 1072 insertions, 746 deletions
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index e2f9ef50b1b..d0004d59c97 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/bitops.h> #include "core.h" #include "debug.h" @@ -32,15 +33,37 @@ #include "ce.h" #include "pci.h" -static unsigned int ath10k_target_ps; -module_param(ath10k_target_ps, uint, 0644); -MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); +enum ath10k_pci_irq_mode { + ATH10K_PCI_IRQ_AUTO = 0, + ATH10K_PCI_IRQ_LEGACY = 1, + ATH10K_PCI_IRQ_MSI = 2, +}; + +enum ath10k_pci_reset_mode { + ATH10K_PCI_RESET_AUTO = 0, + ATH10K_PCI_RESET_WARM_ONLY = 1, +}; + +static unsigned int ath10k_pci_target_ps; +static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; +static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; + +module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644); +MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option"); + +module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); +MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); + +module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); +MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); + +/* how long wait to wait for target to initialise, in ms */ +#define ATH10K_PCI_TARGET_WAIT 3000 +#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 -#define QCA988X_1_0_DEVICE_ID (0xabcd) #define QCA988X_2_0_DEVICE_ID (0x003c) static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { - { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ {0} }; @@ -48,58 +71,245 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, u32 *data); -static void ath10k_pci_process_ce(struct ath10k *ar); static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, int num); -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); -static void ath10k_pci_stop_ce(struct ath10k *ar); -static void ath10k_pci_device_reset(struct ath10k *ar); -static int ath10k_pci_reset_target(struct ath10k *ar); -static int ath10k_pci_start_intr(struct ath10k *ar); -static void ath10k_pci_stop_intr(struct ath10k *ar); +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); +static int ath10k_pci_cold_reset(struct ath10k *ar); +static int ath10k_pci_warm_reset(struct ath10k *ar); +static int ath10k_pci_wait_for_target_init(struct ath10k *ar); +static int ath10k_pci_init_irq(struct ath10k *ar); +static int ath10k_pci_deinit_irq(struct ath10k *ar); +static int ath10k_pci_request_irq(struct ath10k *ar); +static void ath10k_pci_free_irq(struct ath10k *ar); +static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, + struct ath10k_ce_pipe *rx_pipe, + struct bmi_xfer *xfer); static const struct ce_attr host_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, - /* could be moved to share CE3 */ - /* target->host HTT + HTC control */ - { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, - /* target->host WMI */ - { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, - /* host->target WMI */ - { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, - /* host->target HTT */ - { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, - CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, - /* unused */ - { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* Target autonomous hif_memcpy */ - { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, - /* ce_diag, the Diagnostic Window */ - { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, + /* CE0: host->target HTC control and raw streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 32, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* CE5: unused */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = DIAG_TRANSFER_LIMIT, + .dest_nentries = 2, + }, }; /* Target firmware's Copy Engine configuration. */ static const struct ce_pipe_config target_ce_config_wlan[] = { - /* host->target HTC control and raw streams */ - { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, - /* target->host HTT + HTC control */ - { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, - /* target->host WMI */ - { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target WMI */ - { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* host->target HTT */ - { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, + /* CE0: host->target HTC control and raw streams */ + { + .pipenum = 0, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 256, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE1: target->host HTT + HTC control */ + { + .pipenum = 1, + .pipedir = PIPEDIR_IN, + .nentries = 32, + .nbytes_max = 512, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE2: target->host WMI */ + { + .pipenum = 2, + .pipedir = PIPEDIR_IN, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE3: host->target WMI */ + { + .pipenum = 3, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE4: host->target HTT */ + { + .pipenum = 4, + .pipedir = PIPEDIR_OUT, + .nentries = 256, + .nbytes_max = 256, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + /* NB: 50% of src nentries, since tx has 2 frags */ - /* unused */ - { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, - /* Reserved for target autonomous hif_memcpy */ - { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, + + /* CE5: unused */ + { + .pipenum = 5, + .pipedir = PIPEDIR_OUT, + .nentries = 32, + .nbytes_max = 2048, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + + /* CE6: Reserved for target autonomous hif_memcpy */ + { + .pipenum = 6, + .pipedir = PIPEDIR_INOUT, + .nentries = 32, + .nbytes_max = 4096, + .flags = CE_ATTR_FLAGS, + .reserved = 0, + }, + /* CE7 used only by Host */ }; +static bool ath10k_pci_irq_pending(struct ath10k *ar) +{ + u32 cause; + + /* Check if the shared legacy irq is for us */ + cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS); + if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) + return true; + + return false; +} + +static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) +{ + /* IMPORTANT: INTR_CLR register has to be set after + * INTR_ENABLE is set to 0, otherwise interrupt can not be + * really cleared. */ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + 0); + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer. */ + (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); +} + +static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) +{ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + + /* IMPORTANT: this extra read transaction is required to + * flush the posted write buffer. */ + (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS); +} + +static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (ar_pci->num_msi_intrs == 0) { + if (!ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_legacy_irq(ar); + } + + tasklet_schedule(&ar_pci->early_irq_tasklet); + + return IRQ_HANDLED; +} + +static int ath10k_pci_request_early_irq(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first + * interrupt from irq vector is triggered in all cases for FW + * indication/errors */ + ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, + IRQF_SHARED, "ath10k_pci (early)", ar); + if (ret) { + ath10k_warn("failed to request early irq: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath10k_pci_free_early_irq(struct ath10k *ar) +{ + free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); +} + /* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user @@ -114,7 +324,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, unsigned int completed_nbytes, orig_nbytes, remaining_bytes; unsigned int id; unsigned int flags; - struct ce_state *ce_diag; + struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; dma_addr_t ce_data_base = 0; @@ -149,9 +359,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * 2) Buffer in DMA-able space */ orig_nbytes = nbytes; - data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, - orig_nbytes, - &ce_data_base); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, + orig_nbytes, + &ce_data_base, + GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; @@ -245,12 +456,12 @@ done: __le32_to_cpu(((__le32 *)data_buf)[i]); } } else - ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", - __func__, address); + ath10k_warn("failed to read diag value at 0x%x: %d\n", + address, ret); if (data_buf) - pci_free_consistent(ar_pci->pdev, orig_nbytes, - data_buf, ce_data_base); + dma_free_coherent(ar->dev, orig_nbytes, data_buf, + ce_data_base); return ret; } @@ -278,7 +489,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, unsigned int completed_nbytes, orig_nbytes, remaining_bytes; unsigned int id; unsigned int flags; - struct ce_state *ce_diag; + struct ath10k_ce_pipe *ce_diag; void *data_buf = NULL; u32 ce_data; /* Host buffer address in CE space */ dma_addr_t ce_data_base = 0; @@ -293,9 +504,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * 2) Buffer in DMA-able space */ orig_nbytes = nbytes; - data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev, - orig_nbytes, - &ce_data_base); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, + orig_nbytes, + &ce_data_base, + GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; goto done; @@ -391,13 +603,13 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, done: if (data_buf) { - pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf, - ce_data_base); + dma_free_coherent(ar->dev, orig_nbytes, data_buf, + ce_data_base); } if (ret != 0) - ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__, - address); + ath10k_warn("failed to write diag value at 0x%x: %d\n", + address, ret); return ret; } @@ -426,18 +638,7 @@ static bool ath10k_pci_target_is_awake(struct ath10k *ar) return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); } -static void ath10k_pci_wait(struct ath10k *ar) -{ - int n = 100; - - while (n-- && !ath10k_pci_target_is_awake(ar)) - msleep(10); - - if (n < 0) - ath10k_warn("Unable to wakeup target\n"); -} - -void ath10k_do_pci_wake(struct ath10k *ar) +int ath10k_do_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); void __iomem *pci_addr = ar_pci->mem; @@ -453,18 +654,19 @@ void ath10k_do_pci_wake(struct ath10k *ar) atomic_inc(&ar_pci->keep_awake_count); if (ar_pci->verified_awake) - return; + return 0; for (;;) { if (ath10k_pci_target_is_awake(ar)) { ar_pci->verified_awake = true; - break; + return 0; } if (tot_delay > PCIE_WAKE_TIMEOUT) { - ath10k_warn("target takes too long to wake up (awake count %d)\n", + ath10k_warn("target took longer %d us to wake up (awake count %d)\n", + PCIE_WAKE_TIMEOUT, atomic_read(&ar_pci->keep_awake_count)); - break; + return -ETIMEDOUT; } udelay(curr_delay); @@ -489,195 +691,146 @@ void ath10k_do_pci_sleep(struct ath10k *ar) } } -/* - * FIXME: Handle OOM properly. - */ -static inline -struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) -{ - struct ath10k_pci_compl *compl = NULL; - - spin_lock_bh(&pipe_info->pipe_lock); - if (list_empty(&pipe_info->compl_free)) { - ath10k_warn("Completion buffers are full\n"); - goto exit; - } - compl = list_first_entry(&pipe_info->compl_free, - struct ath10k_pci_compl, list); - list_del(&compl->list); -exit: - spin_unlock_bh(&pipe_info->pipe_lock); - return compl; -} - /* Called by lower (CE) layer when a send to Target completes. */ -static void ath10k_pci_ce_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; - struct ath10k_pci_compl *compl; - bool process = false; + struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; + void *transfer_context; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; - do { - /* - * For the send completion of an item in sendlist, just - * increment num_sends_allowed. The upper layer callback will - * be triggered when last fragment is done with send. - */ - if (transfer_context == CE_SENDLIST_ITEM_CTXT) { - spin_lock_bh(&pipe_info->pipe_lock); - pipe_info->num_sends_allowed++; - spin_unlock_bh(&pipe_info->pipe_lock); + while (ath10k_ce_completed_send_next(ce_state, &transfer_context, + &ce_data, &nbytes, + &transfer_id) == 0) { + /* no need to call tx completion for NULL pointers */ + if (transfer_context == NULL) continue; - } - - compl = get_free_compl(pipe_info); - if (!compl) - break; - - compl->send_or_recv = HIF_CE_COMPLETE_SEND; - compl->ce_state = ce_state; - compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; - compl->nbytes = nbytes; - compl->transfer_id = transfer_id; - compl->flags = 0; - - /* - * Add the completion to the processing queue. - */ - spin_lock_bh(&ar_pci->compl_lock); - list_add_tail(&compl->list, &ar_pci->compl_process); - spin_unlock_bh(&ar_pci->compl_lock); - - process = true; - } while (ath10k_ce_completed_send_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id) == 0); - /* - * If only some of the items within a sendlist have completed, - * don't invoke completion processing until the entire sendlist - * has been sent. - */ - if (!process) - return; - - ath10k_pci_process_ce(ar); + cb->tx_completion(ar, transfer_context, transfer_id); + } } /* Called by lower (CE) layer when data is received from the Target. */ -static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, - void *transfer_context, u32 ce_data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; - struct ath10k_pci_compl *compl; + struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; + struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; struct sk_buff *skb; - - do { - compl = get_free_compl(pipe_info); - if (!compl) - break; - - compl->send_or_recv = HIF_CE_COMPLETE_RECV; - compl->ce_state = ce_state; - compl->pipe_info = pipe_info; - compl->transfer_context = transfer_context; - compl->nbytes = nbytes; - compl->transfer_id = transfer_id; - compl->flags = flags; + void *transfer_context; + u32 ce_data; + unsigned int nbytes, max_nbytes; + unsigned int transfer_id; + unsigned int flags; + int err; + + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &ce_data, &nbytes, &transfer_id, + &flags) == 0) { + err = ath10k_pci_post_rx_pipe(pipe_info, 1); + if (unlikely(err)) { + /* FIXME: retry */ + ath10k_warn("failed to replenish CE rx ring %d: %d\n", + pipe_info->pipe_num, err); + } skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - /* - * Add the completion to the processing queue. - */ - spin_lock_bh(&ar_pci->compl_lock); - list_add_tail(&compl->list, &ar_pci->compl_process); - spin_unlock_bh(&ar_pci->compl_lock); + max_nbytes, DMA_FROM_DEVICE); - } while (ath10k_ce_completed_recv_next(ce_state, - &transfer_context, - &ce_data, &nbytes, - &transfer_id, - &flags) == 0); + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn("rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } - ath10k_pci_process_ce(ar); + skb_put(skb, nbytes); + cb->rx_completion(ar, skb, pipe_info->pipe_num); + } } -/* Send the first nbytes bytes of the buffer */ -static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id, - unsigned int transfer_id, - unsigned int bytes, struct sk_buff *nbuf) +static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) { - struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); - struct ce_state *ce_hdl = pipe_info->ce_hdl; - struct ce_sendlist sendlist; - unsigned int len; - u32 flags = 0; - int ret; - - memset(&sendlist, 0, sizeof(struct ce_sendlist)); + struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; + struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; + struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; + unsigned int nentries_mask; + unsigned int sw_index; + unsigned int write_index; + int err, i = 0; + + spin_lock_bh(&ar_pci->ce_lock); + + nentries_mask = src_ring->nentries_mask; + sw_index = src_ring->sw_index; + write_index = src_ring->write_index; + + if (unlikely(CE_RING_DELTA(nentries_mask, + write_index, sw_index - 1) < n_items)) { + err = -ENOBUFS; + goto err; + } - len = min(bytes, nbuf->len); - bytes -= len; + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ATH10K_DBG_PCI, + "pci tx item %d paddr 0x%08x len %d n_items %d\n", + i, items[i].paddr, items[i].len, n_items); + ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + items[i].vaddr, items[i].len); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } - if (len & 3) - ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len); + /* `i` is equal to `n_items -1` after for() */ ath10k_dbg(ATH10K_DBG_PCI, - "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n", - nbuf->data, (unsigned long long) skb_cb->paddr, - nbuf->len, len); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, - "ath10k tx: data: ", - nbuf->data, nbuf->len); - - ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags); - - /* Make sure we have resources to handle this request */ - spin_lock_bh(&pipe_info->pipe_lock); - if (!pipe_info->num_sends_allowed) { - ath10k_warn("Pipe: %d is full\n", pipe_id); - spin_unlock_bh(&pipe_info->pipe_lock); - return -ENOSR; - } - pipe_info->num_sends_allowed--; - spin_unlock_bh(&pipe_info->pipe_lock); - - ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); - if (ret) - ath10k_warn("CE send failed: %p\n", nbuf); + "pci tx item %d paddr 0x%08x len %d n_items %d\n", + i, items[i].paddr, items[i].len, n_items); + ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + items[i].vaddr, items[i].len); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; - return ret; + spin_unlock_bh(&ar_pci->ce_lock); + return 0; + +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); + + spin_unlock_bh(&ar_pci->ce_lock); + return err; } static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); - int ret; - spin_lock_bh(&pipe_info->pipe_lock); - ret = pipe_info->num_sends_allowed; - spin_unlock_bh(&pipe_info->pipe_lock); + ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n"); - return ret; + return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } static void ath10k_pci_hif_dump_area(struct ath10k *ar) @@ -691,14 +844,13 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) ath10k_err("firmware crashed!\n"); ath10k_err("hardware name %s version 0x%x\n", ar->hw_params.name, ar->target_version); - ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major, - ar->fw_version_minor, ar->fw_version_release, - ar->fw_version_build); + ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version); host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); - if (ath10k_pci_diag_read_mem(ar, host_addr, - ®_dump_area, sizeof(u32)) != 0) { - ath10k_warn("could not read hi_failure_state\n"); + ret = ath10k_pci_diag_read_mem(ar, host_addr, + ®_dump_area, sizeof(u32)); + if (ret) { + ath10k_err("failed to read FW dump area address: %d\n", ret); return; } @@ -708,7 +860,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) ®_dump_values[0], REG_DUMP_COUNT_QCA988X * sizeof(u32)); if (ret != 0) { - ath10k_err("could not dump FW Dump Area\n"); + ath10k_err("failed to read FW dump area: %d\n", ret); return; } @@ -723,12 +875,14 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) reg_dump_values[i + 2], reg_dump_values[i + 3]); - ieee80211_queue_work(ar->hw, &ar->restart_work); + queue_work(ar->workqueue, &ar->restart_work); } static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { + ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n"); + if (!force) { int resources; /* @@ -755,211 +909,54 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n"); memcpy(&ar_pci->msg_callbacks_current, callbacks, sizeof(ar_pci->msg_callbacks_current)); } -static int ath10k_pci_start_ce(struct ath10k *ar) +static int ath10k_pci_setup_ce_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_diag = ar_pci->ce_diag; const struct ce_attr *attr; - struct hif_ce_pipe_info *pipe_info; - struct ath10k_pci_compl *compl; - int i, pipe_num, completions, disable_interrupts; + struct ath10k_pci_pipe *pipe_info; + int pipe_num, disable_interrupts; - spin_lock_init(&ar_pci->compl_lock); - INIT_LIST_HEAD(&ar_pci->compl_process); - - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { pipe_info = &ar_pci->pipe_info[pipe_num]; - spin_lock_init(&pipe_info->pipe_lock); - INIT_LIST_HEAD(&pipe_info->compl_free); - /* Handle Diagnostic CE specially */ - if (pipe_info->ce_hdl == ce_diag) + if (pipe_info->ce_hdl == ar_pci->ce_diag) continue; attr = &host_ce_config_wlan[pipe_num]; - completions = 0; if (attr->src_nentries) { disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; ath10k_ce_send_cb_register(pipe_info->ce_hdl, ath10k_pci_ce_send_done, disable_interrupts); - completions += attr->src_nentries; - pipe_info->num_sends_allowed = attr->src_nentries - 1; } - if (attr->dest_nentries) { + if (attr->dest_nentries) ath10k_ce_recv_cb_register(pipe_info->ce_hdl, ath10k_pci_ce_recv_data); - completions += attr->dest_nentries; - } - - if (completions == 0) - continue; - - for (i = 0; i < completions; i++) { - compl = kmalloc(sizeof(struct ath10k_pci_compl), - GFP_KERNEL); - if (!compl) { - ath10k_warn("No memory for completion state\n"); - ath10k_pci_stop_ce(ar); - return -ENOMEM; - } - - compl->send_or_recv = HIF_CE_COMPLETE_FREE; - list_add_tail(&compl->list, &pipe_info->compl_free); - } } return 0; } -static void ath10k_pci_stop_ce(struct ath10k *ar) +static void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_compl *compl; - struct sk_buff *skb; int i; - ath10k_ce_disable_interrupts(ar); - - /* Cancel the pending tasklet */ tasklet_kill(&ar_pci->intr_tq); + tasklet_kill(&ar_pci->msi_fw_err); + tasklet_kill(&ar_pci->early_irq_tasklet); for (i = 0; i < CE_COUNT; i++) tasklet_kill(&ar_pci->pipe_info[i].intr); - - /* Mark pending completions as aborted, so that upper layers free up - * their associated resources */ - spin_lock_bh(&ar_pci->compl_lock); - list_for_each_entry(compl, &ar_pci->compl_process, list) { - skb = (struct sk_buff *)compl->transfer_context; - ATH10K_SKB_CB(skb)->is_aborted = true; - } - spin_unlock_bh(&ar_pci->compl_lock); -} - -static void ath10k_pci_cleanup_ce(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_compl *compl, *tmp; - struct hif_ce_pipe_info *pipe_info; - struct sk_buff *netbuf; - int pipe_num; - - /* Free pending completions. */ - spin_lock_bh(&ar_pci->compl_lock); - if (!list_empty(&ar_pci->compl_process)) - ath10k_warn("pending completions still present! possible memory leaks.\n"); - - list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { - list_del(&compl->list); - netbuf = (struct sk_buff *)compl->transfer_context; - dev_kfree_skb_any(netbuf); - kfree(compl); - } - spin_unlock_bh(&ar_pci->compl_lock); - - /* Free unused completions for each pipe. */ - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - - spin_lock_bh(&pipe_info->pipe_lock); - list_for_each_entry_safe(compl, tmp, - &pipe_info->compl_free, list) { - list_del(&compl->list); - kfree(compl); - } - spin_unlock_bh(&pipe_info->pipe_lock); - } -} - -static void ath10k_pci_process_ce(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ar->hif.priv; - struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; - struct ath10k_pci_compl *compl; - struct sk_buff *skb; - unsigned int nbytes; - int ret, send_done = 0; - - /* Upper layers aren't ready to handle tx/rx completions in parallel so - * we must serialize all completion processing. */ - - spin_lock_bh(&ar_pci->compl_lock); - if (ar_pci->compl_processing) { - spin_unlock_bh(&ar_pci->compl_lock); - return; - } - ar_pci->compl_processing = true; - spin_unlock_bh(&ar_pci->compl_lock); - - for (;;) { - spin_lock_bh(&ar_pci->compl_lock); - if (list_empty(&ar_pci->compl_process)) { - spin_unlock_bh(&ar_pci->compl_lock); - break; - } - compl = list_first_entry(&ar_pci->compl_process, - struct ath10k_pci_compl, list); - list_del(&compl->list); - spin_unlock_bh(&ar_pci->compl_lock); - - if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { - cb->tx_completion(ar, - compl->transfer_context, - compl->transfer_id); - send_done = 1; - } else { - ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); - if (ret) { - ath10k_warn("Unable to post recv buffer for pipe: %d\n", - compl->pipe_info->pipe_num); - break; - } - - skb = (struct sk_buff *)compl->transfer_context; - nbytes = compl->nbytes; - - ath10k_dbg(ATH10K_DBG_PCI, - "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n", - skb, nbytes); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, - "ath10k rx: ", skb->data, nbytes); - - if (skb->len + skb_tailroom(skb) >= nbytes) { - skb_trim(skb, 0); - skb_put(skb, nbytes); - cb->rx_completion(ar, skb, - compl->pipe_info->pipe_num); - } else { - ath10k_warn("rxed more than expected (nbytes %d, max %d)", - nbytes, - skb->len + skb_tailroom(skb)); - } - } - - compl->send_or_recv = HIF_CE_COMPLETE_FREE; - - /* - * Add completion back to the pipe's free list. - */ - spin_lock_bh(&compl->pipe_info->pipe_lock); - list_add_tail(&compl->list, &compl->pipe_info->compl_free); - compl->pipe_info->num_sends_allowed += send_done; - spin_unlock_bh(&compl->pipe_info->pipe_lock); - } - - spin_lock_bh(&ar_pci->compl_lock); - ar_pci->compl_processing = false; - spin_unlock_bh(&ar_pci->compl_lock); } /* TODO - temporary mapping while we have too few CE's */ @@ -970,6 +967,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, { int ret = 0; + ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n"); + /* polling for received messages not supported */ *dl_is_polled = 0; @@ -1029,6 +1028,8 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, { int ul_is_polled, dl_is_polled; + ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n"); + (void)ath10k_pci_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_RSVD_CTRL, ul_pipe, @@ -1037,12 +1038,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, +static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, int num) { struct ath10k *ar = pipe_info->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_state = pipe_info->ce_hdl; + struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; struct sk_buff *skb; dma_addr_t ce_data; int i, ret = 0; @@ -1053,7 +1054,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, for (i = 0; i < num; i++) { skb = dev_alloc_skb(pipe_info->buf_sz); if (!skb) { - ath10k_warn("could not allocate skbuff for pipe %d\n", + ath10k_warn("failed to allocate skbuff for pipe %d\n", num); ret = -ENOMEM; goto err; @@ -1066,7 +1067,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ar->dev, ce_data))) { - ath10k_warn("could not dma map skbuff\n"); + ath10k_warn("failed to DMA map sk_buff\n"); dev_kfree_skb_any(skb); ret = -EIO; goto err; @@ -1081,7 +1082,7 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, ce_data); if (ret) { - ath10k_warn("could not enqueue to pipe %d (%d)\n", + ath10k_warn("failed to enqueue to pipe %d: %d\n", num, ret); goto err; } @@ -1097,11 +1098,11 @@ err: static int ath10k_pci_post_rx(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; const struct ce_attr *attr; int pipe_num, ret = 0; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { pipe_info = &ar_pci->pipe_info[pipe_num]; attr = &host_ce_config_wlan[pipe_num]; @@ -1111,8 +1112,8 @@ static int ath10k_pci_post_rx(struct ath10k *ar) ret = ath10k_pci_post_rx_pipe(pipe_info, attr->dest_nentries - 1); if (ret) { - ath10k_warn("Unable to replenish recv buffers for pipe: %d\n", - pipe_num); + ath10k_warn("failed to post RX buffer for pipe %d: %d\n", + pipe_num, ret); for (; pipe_num >= 0; pipe_num--) { pipe_info = &ar_pci->pipe_info[pipe_num]; @@ -1128,30 +1129,57 @@ static int ath10k_pci_post_rx(struct ath10k *ar) static int ath10k_pci_hif_start(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; + int ret, ret_early; - ret = ath10k_pci_start_ce(ar); + ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n"); + + ath10k_pci_free_early_irq(ar); + ath10k_pci_kill_tasklet(ar); + + ret = ath10k_pci_request_irq(ar); if (ret) { - ath10k_warn("could not start CE (%d)\n", ret); - return ret; + ath10k_warn("failed to post RX buffers for all pipes: %d\n", + ret); + goto err_early_irq; + } + + ret = ath10k_pci_setup_ce_irq(ar); + if (ret) { + ath10k_warn("failed to setup CE interrupts: %d\n", ret); + goto err_stop; } /* Post buffers once to start things off. */ ret = ath10k_pci_post_rx(ar); if (ret) { - ath10k_warn("could not post rx pipes (%d)\n", ret); - return ret; + ath10k_warn("failed to post RX buffers for all pipes: %d\n", + ret); + goto err_stop; } ar_pci->started = 1; return 0; + +err_stop: + ath10k_ce_disable_interrupts(ar); + ath10k_pci_free_irq(ar); + ath10k_pci_kill_tasklet(ar); +err_early_irq: + /* Though there should be no interrupts (device was reset) + * power_down() expects the early IRQ to be installed as per the + * driver lifecycle. */ + ret_early = ath10k_pci_request_early_irq(ar); + if (ret_early) + ath10k_warn("failed to re-enable early irq: %d\n", ret_early); + + return ret; } -static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; u32 buf_sz; struct sk_buff *netbuf; u32 ce_data; @@ -1179,11 +1207,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) } } -static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) +static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) { struct ath10k *ar; struct ath10k_pci *ar_pci; - struct ce_state *ce_hdl; + struct ath10k_ce_pipe *ce_hdl; struct sk_buff *netbuf; u32 ce_data; unsigned int nbytes; @@ -1206,15 +1234,13 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, &ce_data, &nbytes, &id) == 0) { - if (netbuf != CE_SENDLIST_ITEM_CTXT) - /* - * Indicate the completion to higer layer to free - * the buffer - */ - ATH10K_SKB_CB(netbuf)->is_aborted = true; - ar_pci->msg_callbacks_current.tx_completion(ar, - netbuf, - id); + /* no need to call tx completion for NULL pointers */ + if (!netbuf) + continue; + + ar_pci->msg_callbacks_current.tx_completion(ar, + netbuf, + id); } } @@ -1231,8 +1257,8 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int pipe_num; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - struct hif_ce_pipe_info *pipe_info; + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + struct ath10k_pci_pipe *pipe_info; pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_pci_rx_pipe_cleanup(pipe_info); @@ -1242,49 +1268,46 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) static void ath10k_pci_ce_deinit(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; - int pipe_num; - - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - if (pipe_info->ce_hdl) { - ath10k_ce_deinit(pipe_info->ce_hdl); - pipe_info->ce_hdl = NULL; - pipe_info->buf_sz = 0; - } - } -} - -static void ath10k_pci_disable_irqs(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - disable_irq(ar_pci->pdev->irq + i); + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_deinit_pipe(ar, i); } static void ath10k_pci_hif_stop(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n"); - /* Irqs are never explicitly re-enabled. They are implicitly re-enabled - * by ath10k_pci_start_intr(). */ - ath10k_pci_disable_irqs(ar); + if (WARN_ON(!ar_pci->started)) + return; - ath10k_pci_stop_ce(ar); + ret = ath10k_ce_disable_interrupts(ar); + if (ret) + ath10k_warn("failed to disable CE interrupts: %d\n", ret); + + ath10k_pci_free_irq(ar); + ath10k_pci_kill_tasklet(ar); + + ret = ath10k_pci_request_early_irq(ar); + if (ret) + ath10k_warn("failed to re-enable early irq: %d\n", ret); /* At this point, asynchronous threads are stopped, the target should * not DMA nor interrupt. We process the leftovers and then free * everything else up. */ - ath10k_pci_process_ce(ar); - ath10k_pci_cleanup_ce(ar); ath10k_pci_buffer_cleanup(ar); + /* Make the sure the device won't access any structures on the host by + * resetting it. The device was fed with PCI CE ringbuffer + * configuration during init. If ringbuffers are freed and the device + * were to access them this could lead to memory corruption on the + * host. */ + ath10k_pci_warm_reset(ar); + ar_pci->started = 0; } @@ -1293,14 +1316,18 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; - struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; + struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; + struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; + struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; + struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; dma_addr_t req_paddr = 0; dma_addr_t resp_paddr = 0; struct bmi_xfer xfer = {}; void *treq, *tresp = NULL; int ret = 0; + might_sleep(); + if (resp && !resp_len) return -EINVAL; @@ -1341,14 +1368,12 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, if (ret) goto err_resp; - ret = wait_for_completion_timeout(&xfer.done, - BMI_COMMUNICATION_TIMEOUT_HZ); - if (ret <= 0) { + ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); + if (ret) { u32 unused_buffer; unsigned int unused_nbytes; unsigned int unused_id; - ret = -ETIMEDOUT; ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, &unused_nbytes, &unused_id); } else { @@ -1378,13 +1403,16 @@ err_dma: return ret; } -static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id) +static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct bmi_xfer *xfer; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + + if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, + &nbytes, &transfer_id)) + return; if (xfer->wait_for_resp) return; @@ -1392,14 +1420,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, complete(&xfer->done); } -static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, - void *transfer_context, - u32 data, - unsigned int nbytes, - unsigned int transfer_id, - unsigned int flags) +static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { - struct bmi_xfer *xfer = transfer_context; + struct bmi_xfer *xfer; + u32 ce_data; + unsigned int nbytes; + unsigned int transfer_id; + unsigned int flags; + + if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, + &nbytes, &transfer_id, &flags)) + return; if (!xfer->wait_for_resp) { ath10k_warn("unexpected: BMI data received; ignoring\n"); @@ -1410,6 +1441,25 @@ static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, complete(&xfer->done); } +static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, + struct ath10k_ce_pipe *rx_pipe, + struct bmi_xfer *xfer) +{ + unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; + + while (time_before_eq(jiffies, timeout)) { + ath10k_pci_bmi_send_done(tx_pipe); + ath10k_pci_bmi_recv_data(rx_pipe); + + if (completion_done(&xfer->done)) + return 0; + + schedule(); + } + + return -ETIMEDOUT; +} + /* * Map from service/endpoint to Copy Engine. * This table is derived from the CE_PCI TABLE, above. @@ -1519,7 +1569,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) CORE_CTRL_ADDRESS, &core_ctrl); if (ret) { - ath10k_warn("Unable to read core ctrl\n"); + ath10k_warn("failed to read core_ctrl: %d\n", ret); return ret; } @@ -1529,10 +1579,13 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS, core_ctrl); - if (ret) - ath10k_warn("Unable to set interrupt mask\n"); + if (ret) { + ath10k_warn("failed to set target CPU interrupt mask: %d\n", + ret); + return ret; + } - return ret; + return 0; } static int ath10k_pci_init_config(struct ath10k *ar) @@ -1674,73 +1727,78 @@ static int ath10k_pci_init_config(struct ath10k *ar) return 0; } +static int ath10k_pci_alloc_ce(struct ath10k *ar) +{ + int i, ret; + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err("failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static void ath10k_pci_free_ce(struct ath10k *ar) +{ + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} static int ath10k_pci_ce_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct hif_ce_pipe_info *pipe_info; + struct ath10k_pci_pipe *pipe_info; const struct ce_attr *attr; - int pipe_num; + int pipe_num, ret; - for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { pipe_info = &ar_pci->pipe_info[pipe_num]; + pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num]; pipe_info->pipe_num = pipe_num; pipe_info->hif_ce_state = ar; attr = &host_ce_config_wlan[pipe_num]; - pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); - if (pipe_info->ce_hdl == NULL) { - ath10k_err("Unable to initialize CE for pipe: %d\n", - pipe_num); - - /* It is safe to call it here. It checks if ce_hdl is - * valid for each pipe */ - ath10k_pci_ce_deinit(ar); - return -1; + ret = ath10k_ce_init_pipe(ar, pipe_num, attr); + if (ret) { + ath10k_err("failed to initialize copy engine pipe %d: %d\n", + pipe_num, ret); + return ret; } - if (pipe_num == ar_pci->ce_count - 1) { + if (pipe_num == CE_COUNT - 1) { /* * Reserve the ultimate CE for * diagnostic Window support */ - ar_pci->ce_diag = - ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl; + ar_pci->ce_diag = pipe_info->ce_hdl; continue; } pipe_info->buf_sz = (size_t) (attr->src_sz_max); } - /* - * Initially, establish CE completion handlers for use with BMI. - * These are overwritten with generic handlers after we exit BMI phase. - */ - pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; - ath10k_ce_send_cb_register(pipe_info->ce_hdl, - ath10k_pci_bmi_send_done, 0); - - pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; - ath10k_ce_recv_cb_register(pipe_info->ce_hdl, - ath10k_pci_bmi_recv_data); - return 0; } static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - u32 fw_indicator_address, fw_indicator; + u32 fw_indicator; ath10k_pci_wake(ar); - fw_indicator_address = ar_pci->fw_indicator_address; - fw_indicator = ath10k_pci_read32(ar, fw_indicator_address); + fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); if (fw_indicator & FW_IND_EVENT_PENDING) { /* ACK: clear Target-side pending event */ - ath10k_pci_write32(ar, fw_indicator_address, + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, fw_indicator & ~FW_IND_EVENT_PENDING); if (ar_pci->started) { @@ -1757,17 +1815,120 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) ath10k_pci_sleep(ar); } -static int ath10k_pci_hif_power_up(struct ath10k *ar) +/* this function effectively clears target memory controller assert line */ +static void ath10k_pci_warm_reset_si0(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; + u32 val; + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_SI0_RST_MASK); + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + + msleep(10); + + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_SI0_RST_MASK); + val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); + + msleep(10); +} + +static int ath10k_pci_warm_reset(struct ath10k *ar) +{ + int ret = 0; + u32 val; + + ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n"); - ret = ath10k_pci_start_intr(ar); + ret = ath10k_do_pci_wake(ar); if (ret) { - ath10k_err("could not start interrupt handling (%d)\n", ret); - goto err; + ath10k_err("failed to wake up target: %d\n", ret); + return ret; } + /* debug */ + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS); + ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CPU_INTR_ADDRESS); + ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + val); + + /* disable pending irqs */ + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS, 0); + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CLR_ADDRESS, ~0); + + msleep(100); + + /* clear fw indicator */ + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); + + /* clear target LF timer interrupts */ + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_CONTROL0_ADDRESS); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + + SOC_LF_TIMER_CONTROL0_ADDRESS, + val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); + + /* reset CE */ + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CE_RST_MASK); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + msleep(10); + + /* unreset CE */ + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val & ~SOC_RESET_CONTROL_CE_RST_MASK); + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + msleep(10); + + ath10k_pci_warm_reset_si0(ar); + + /* debug */ + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_CAUSE_ADDRESS); + ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + + val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + + CPU_INTR_ADDRESS); + ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + val); + + /* CPU warm reset */ + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, + val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); + + val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + + SOC_RESET_CONTROL_ADDRESS); + ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val); + + msleep(100); + + ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n"); + + ath10k_do_pci_sleep(ar); + return ret; +} + +static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + const char *irq_mode; + int ret; + /* * Bring the target up cleanly. * @@ -1778,50 +1939,159 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) * is in an unexpected state. We try to catch that here in order to * reset the Target and retry the probe. */ - ath10k_pci_device_reset(ar); + if (cold_reset) + ret = ath10k_pci_cold_reset(ar); + else + ret = ath10k_pci_warm_reset(ar); - ret = ath10k_pci_reset_target(ar); - if (ret) - goto err_irq; + if (ret) { + ath10k_err("failed to reset target: %d\n", ret); + goto err; + } if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) /* Force AWAKE forever */ ath10k_do_pci_wake(ar); ret = ath10k_pci_ce_init(ar); - if (ret) + if (ret) { + ath10k_err("failed to initialize CE: %d\n", ret); goto err_ps; + } - ret = ath10k_pci_init_config(ar); - if (ret) + ret = ath10k_ce_disable_interrupts(ar); + if (ret) { + ath10k_err("failed to disable CE interrupts: %d\n", ret); goto err_ce; + } - ret = ath10k_pci_wake_target_cpu(ar); + ret = ath10k_pci_init_irq(ar); if (ret) { - ath10k_err("could not wake up target CPU (%d)\n", ret); + ath10k_err("failed to init irqs: %d\n", ret); goto err_ce; } + ret = ath10k_pci_request_early_irq(ar); + if (ret) { + ath10k_err("failed to request early irq: %d\n", ret); + goto err_deinit_irq; + } + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) { + ath10k_err("failed to wait for target to init: %d\n", ret); + goto err_free_early_irq; + } + + ret = ath10k_pci_init_config(ar); + if (ret) { + ath10k_err("failed to setup init config: %d\n", ret); + goto err_free_early_irq; + } + + ret = ath10k_pci_wake_target_cpu(ar); + if (ret) { + ath10k_err("could not wake up target CPU: %d\n", ret); + goto err_free_early_irq; + } + + if (ar_pci->num_msi_intrs > 1) + irq_mode = "MSI-X"; + else if (ar_pci->num_msi_intrs == 1) + irq_mode = "MSI"; + else + irq_mode = "legacy"; + + if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) + ath10k_info("pci irq %s irq_mode %d reset_mode %d\n", + irq_mode, ath10k_pci_irq_mode, + ath10k_pci_reset_mode); + return 0; +err_free_early_irq: + ath10k_pci_free_early_irq(ar); +err_deinit_irq: + ath10k_pci_deinit_irq(ar); err_ce: ath10k_pci_ce_deinit(ar); + ath10k_pci_warm_reset(ar); err_ps: if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_sleep(ar); -err_irq: - ath10k_pci_stop_intr(ar); err: return ret; } +static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) +{ + int i, ret; + + /* + * Sometime warm reset succeeds after retries. + * + * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work + * at first try. + */ + for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { + ret = __ath10k_pci_hif_power_up(ar, false); + if (ret == 0) + break; + + ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n", + i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); + } + + return ret; +} + +static int ath10k_pci_hif_power_up(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n"); + + /* + * Hardware CUS232 version 2 has some issues with cold reset and the + * preferred (and safer) way to perform a device reset is through a + * warm reset. + * + * Warm reset doesn't always work though so fall back to cold reset may + * be necessary. + */ + ret = ath10k_pci_hif_power_up_warm(ar); + if (ret) { + ath10k_warn("failed to power up target using warm reset: %d\n", + ret); + + if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) + return ret; + + ath10k_warn("trying cold reset\n"); + + ret = __ath10k_pci_hif_power_up(ar, true); + if (ret) { + ath10k_err("failed to power up target using cold reset too (%d)\n", + ret); + return ret; + } + } + + return 0; +} + static void ath10k_pci_hif_power_down(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_pci_stop_intr(ar); + ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n"); + ath10k_pci_free_early_irq(ar); + ath10k_pci_kill_tasklet(ar); + ath10k_pci_deinit_irq(ar); ath10k_pci_ce_deinit(ar); + ath10k_pci_warm_reset(ar); + if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) ath10k_do_pci_sleep(ar); } @@ -1876,7 +2146,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) #endif static const struct ath10k_hif_ops ath10k_pci_hif_ops = { - .send_head = ath10k_pci_hif_send_head, + .tx_sg = ath10k_pci_hif_tx_sg, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, .start = ath10k_pci_hif_start, .stop = ath10k_pci_hif_stop, @@ -1895,7 +2165,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { static void ath10k_pci_ce_tasklet(unsigned long ptr) { - struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; + struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; struct ath10k_pci *ar_pci = pipe->ar_pci; ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); @@ -1955,25 +2225,10 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (ar_pci->num_msi_intrs == 0) { - /* - * IMPORTANT: INTR_CLR regiser has to be set after - * INTR_ENABLE is set to 0, otherwise interrupt can not be - * really cleared. - */ - iowrite32(0, ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_CLR_ADDRESS)); - /* - * IMPORTANT: this extra read transaction is required to - * flush the posted write buffer. - */ - (void) ioread32(ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); + if (!ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_legacy_irq(ar); } tasklet_schedule(&ar_pci->intr_tq); @@ -1981,6 +2236,30 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) return IRQ_HANDLED; } +static void ath10k_pci_early_irq_tasklet(unsigned long data) +{ + struct ath10k *ar = (struct ath10k *)data; + u32 fw_ind; + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn("failed to wake target in early irq tasklet: %d\n", + ret); + return; + } + + fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + if (fw_ind & FW_IND_EVENT_PENDING) { + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, + fw_ind & ~FW_IND_EVENT_PENDING); + ath10k_pci_hif_dump_area(ar); + } + + ath10k_pci_sleep(ar); + ath10k_pci_enable_legacy_irq(ar); +} + static void ath10k_pci_tasklet(unsigned long data) { struct ath10k *ar = (struct ath10k *)data; @@ -1989,40 +2268,22 @@ static void ath10k_pci_tasklet(unsigned long data) ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ ath10k_ce_per_engine_service_any(ar); - if (ar_pci->num_msi_intrs == 0) { - /* Enable Legacy PCI line interrupts */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - /* - * IMPORTANT: this extra read transaction is required to - * flush the posted write buffer - */ - (void) ioread32(ar_pci->mem + - (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - } + /* Re-enable legacy irq that was disabled in the irq handler */ + if (ar_pci->num_msi_intrs == 0) + ath10k_pci_enable_legacy_irq(ar); } -static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) +static int ath10k_pci_request_irq_msix(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - int i; - - ret = pci_enable_msi_block(ar_pci->pdev, num); - if (ret) - return ret; + int ret, i; ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ath10k_pci_msi_fw_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("request_irq(%d) failed %d\n", + ath10k_warn("failed to request MSI-X fw irq %d: %d\n", ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); - - pci_disable_msi(ar_pci->pdev); return ret; } @@ -2031,44 +2292,38 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num) ath10k_pci_per_engine_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("request_irq(%d) failed %d\n", + ath10k_warn("failed to request MSI-X ce irq %d: %d\n", ar_pci->pdev->irq + i, ret); for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) free_irq(ar_pci->pdev->irq + i, ar); free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); - pci_disable_msi(ar_pci->pdev); return ret; } } - ath10k_info("MSI-X interrupt handling (%d intrs)\n", num); return 0; } -static int ath10k_pci_start_intr_msi(struct ath10k *ar) +static int ath10k_pci_request_irq_msi(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; - ret = pci_enable_msi(ar_pci->pdev); - if (ret < 0) - return ret; - ret = request_irq(ar_pci->pdev->irq, ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); - if (ret < 0) { - pci_disable_msi(ar_pci->pdev); + if (ret) { + ath10k_warn("failed to request MSI irq %d: %d\n", + ar_pci->pdev->irq, ret); return ret; } - ath10k_info("MSI interrupt handling\n"); return 0; } -static int ath10k_pci_start_intr_legacy(struct ath10k *ar) +static int ath10k_pci_request_irq_legacy(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; @@ -2076,168 +2331,247 @@ static int ath10k_pci_start_intr_legacy(struct ath10k *ar) ret = request_irq(ar_pci->pdev->irq, ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); - if (ret < 0) + if (ret) { + ath10k_warn("failed to request legacy irq %d: %d\n", + ar_pci->pdev->irq, ret); return ret; + } - /* - * Make sure to wake the Target before enabling Legacy - * Interrupt. - */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + return 0; +} - ath10k_pci_wait(ar); +static int ath10k_pci_request_irq(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - /* - * A potential race occurs here: The CORE_BASE write - * depends on target correctly decoding AXI address but - * host won't know when target writes BAR to CORE_CTRL. - * This write might get lost if target has NOT written BAR. - * For now, fix the race by repeating the write in below - * synchronization checking. - */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - - ath10k_info("legacy interrupt handling\n"); - return 0; + switch (ar_pci->num_msi_intrs) { + case 0: + return ath10k_pci_request_irq_legacy(ar); + case 1: + return ath10k_pci_request_irq_msi(ar); + case MSI_NUM_REQUEST: + return ath10k_pci_request_irq_msix(ar); + } + + ath10k_warn("unknown irq configuration upon request\n"); + return -EINVAL; } -static int ath10k_pci_start_intr(struct ath10k *ar) +static void ath10k_pci_free_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int num = MSI_NUM_REQUEST; - int ret; int i; - tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar); + /* There's at least one interrupt irregardless whether its legacy INTR + * or MSI or MSI-X */ + for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) + free_irq(ar_pci->pdev->irq + i, ar); +} + +static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, - (unsigned long) ar); + (unsigned long)ar); + tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, + (unsigned long)ar); for (i = 0; i < CE_COUNT; i++) { ar_pci->pipe_info[i].ar_pci = ar_pci; - tasklet_init(&ar_pci->pipe_info[i].intr, - ath10k_pci_ce_tasklet, + tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, (unsigned long)&ar_pci->pipe_info[i]); } +} - if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features)) - num = 1; +static int ath10k_pci_init_irq(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, + ar_pci->features); + int ret; - if (num > 1) { - ret = ath10k_pci_start_intr_msix(ar, num); - if (ret == 0) - goto exit; + ath10k_pci_init_irq_tasklets(ar); + + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && + !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) + ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); + + /* Try MSI-X */ + if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { + ar_pci->num_msi_intrs = MSI_NUM_REQUEST; + ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, + ar_pci->num_msi_intrs); + if (ret > 0) + return 0; - ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret); - num = 1; + /* fall-through */ } - if (num == 1) { - ret = ath10k_pci_start_intr_msi(ar); + /* Try MSI */ + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { + ar_pci->num_msi_intrs = 1; + ret = pci_enable_msi(ar_pci->pdev); if (ret == 0) - goto exit; + return 0; - ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n", - ret); - num = 0; + /* fall-through */ } - ret = ath10k_pci_start_intr_legacy(ar); + /* Try legacy irq + * + * A potential race occurs here: The CORE_BASE write + * depends on target correctly decoding AXI address but + * host won't know when target writes BAR to CORE_CTRL. + * This write might get lost if target has NOT written BAR. + * For now, fix the race by repeating the write in below + * synchronization checking. */ + ar_pci->num_msi_intrs = 0; -exit: - ar_pci->num_msi_intrs = num; - ar_pci->ce_count = CE_COUNT; - return ret; + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn("failed to wake target: %d\n", ret); + return ret; + } + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); + ath10k_pci_sleep(ar); + + return 0; } -static void ath10k_pci_stop_intr(struct ath10k *ar) +static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int i; + int ret; - /* There's at least one interrupt irregardless whether its legacy INTR - * or MSI or MSI-X */ - for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) - free_irq(ar_pci->pdev->irq + i, ar); + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn("failed to wake target: %d\n", ret); + return ret; + } + + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, + 0); + ath10k_pci_sleep(ar); - if (ar_pci->num_msi_intrs > 0) + return 0; +} + +static int ath10k_pci_deinit_irq(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + switch (ar_pci->num_msi_intrs) { + case 0: + return ath10k_pci_deinit_irq_legacy(ar); + case 1: + /* fall-through */ + case MSI_NUM_REQUEST: + pci_disable_msi(ar_pci->pdev); + return 0; + default: pci_disable_msi(ar_pci->pdev); + } + + ath10k_warn("unknown irq configuration upon deinit\n"); + return -EINVAL; } -static int ath10k_pci_reset_target(struct ath10k *ar) +static int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int wait_limit = 300; /* 3 sec */ + unsigned long timeout; + int ret; + u32 val; + + ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_err("failed to wake up target for init: %d\n", ret); + return ret; + } + + timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); - /* Wait for Target to finish initialization before we proceed. */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + do { + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - ath10k_pci_wait(ar); + ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val); + + /* target should never return this */ + if (val == 0xffffffff) + continue; + + /* the device has crashed so don't bother trying anymore */ + if (val & FW_IND_EVENT_PENDING) + break; + + if (val & FW_IND_INITIALIZED) + break; - while (wait_limit-- && - !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) & - FW_IND_INITIALIZED)) { if (ar_pci->num_msi_intrs == 0) /* Fix potential race by repeating CORE_BASE writes */ - iowrite32(PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL, - ar_pci->mem + (SOC_CORE_BASE_ADDRESS | - PCIE_INTR_ENABLE_ADDRESS)); + ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL); + mdelay(10); + } while (time_before(jiffies, timeout)); + + if (val == 0xffffffff) { + ath10k_err("failed to read device register, device is gone\n"); + ret = -EIO; + goto out; } - if (wait_limit < 0) { - ath10k_err("Target stalled\n"); - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - return -EIO; + if (val & FW_IND_EVENT_PENDING) { + ath10k_warn("device has crashed during init\n"); + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, + val & ~FW_IND_EVENT_PENDING); + ath10k_pci_hif_dump_area(ar); + ret = -ECOMM; + goto out; } - iowrite32(PCIE_SOC_WAKE_RESET, - ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); + if (!(val & FW_IND_INITIALIZED)) { + ath10k_err("failed to receive initialized event from target: %08x\n", + val); + ret = -ETIMEDOUT; + goto out; + } - return 0; + ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n"); + +out: + ath10k_pci_sleep(ar); + return ret; } -static void ath10k_pci_device_reset(struct ath10k *ar) +static int ath10k_pci_cold_reset(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *mem = ar_pci->mem; - int i; + int i, ret; u32 val; - if (!SOC_GLOBAL_RESET_ADDRESS) - return; + ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n"); - if (!mem) - return; - - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_V_MASK); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_target_is_awake(ar)) - break; - msleep(1); + ret = ath10k_do_pci_wake(ar); + if (ret) { + ath10k_err("failed to wake up target: %d\n", + ret); + return ret; } /* Put Target, including PCIe, into RESET. */ - val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); + val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & RTC_STATE_COLD_RESET_MASK) break; msleep(1); @@ -2245,16 +2579,20 @@ static void ath10k_pci_device_reset(struct ath10k *ar) /* Pull Target, including PCIe, out of RESET. */ val &= ~1; - ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); + ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & + if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & RTC_STATE_COLD_RESET_MASK)) break; msleep(1); } - ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); + ath10k_do_pci_sleep(ar); + + ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n"); + + return 0; } static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) @@ -2267,13 +2605,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) switch (i) { case ATH10K_PCI_FEATURE_MSI_X: - ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); - break; - case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND: - ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n"); + ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n"); break; case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: - ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); + ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n"); break; } } @@ -2286,9 +2621,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, int ret = 0; struct ath10k *ar; struct ath10k_pci *ar_pci; - u32 lcr_val; + u32 lcr_val, chip_id; - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n"); ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); if (ar_pci == NULL) @@ -2298,62 +2633,42 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->dev = &pdev->dev; switch (pci_dev->device) { - case QCA988X_1_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features); - break; case QCA988X_2_0_DEVICE_ID: set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); break; default: ret = -ENODEV; - ath10k_err("Unkown device ID: %d\n", pci_dev->device); + ath10k_err("Unknown device ID: %d\n", pci_dev->device); goto err_ar_pci; } - if (ath10k_target_ps) + if (ath10k_pci_target_ps) set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); ath10k_pci_dump_features(ar_pci); ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); if (!ar) { - ath10k_err("ath10k_core_create failed!\n"); + ath10k_err("failed to create driver core\n"); ret = -EINVAL; goto err_ar_pci; } - /* Enable QCA988X_1.0 HW workarounds */ - if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) - spin_lock_init(&ar_pci->hw_v1_workaround_lock); - ar_pci->ar = ar; - ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; atomic_set(&ar_pci->keep_awake_count, 0); pci_set_drvdata(pdev, ar); - /* - * Without any knowledge of the Host, the Target may have been reset or - * power cycled and its Config Space may no longer reflect the PCI - * address space that was assigned earlier by the PCI infrastructure. - * Refresh it now. - */ - ret = pci_assign_resource(pdev, BAR_NUM); - if (ret) { - ath10k_err("cannot assign PCI space: %d\n", ret); - goto err_ar; - } - ret = pci_enable_device(pdev); if (ret) { - ath10k_err("cannot enable PCI device: %d\n", ret); + ath10k_err("failed to enable PCI device: %d\n", ret); goto err_ar; } /* Request MMIO resources */ ret = pci_request_region(pdev, BAR_NUM, "ath"); if (ret) { - ath10k_err("PCI MMIO reservation error: %d\n", ret); + ath10k_err("failed to request MMIO region: %d\n", ret); goto err_device; } @@ -2363,13 +2678,13 @@ static int ath10k_pci_probe(struct pci_dev *pdev, */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("32-bit DMA not available: %d\n", ret); + ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret); goto err_region; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("cannot enable 32-bit consistent DMA\n"); + ath10k_err("failed to set consistent DMA mask to 32-bit\n"); goto err_region; } @@ -2386,7 +2701,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, /* Arrange for access to Target SoC registers. */ mem = pci_iomap(pdev, BAR_NUM, 0); if (!mem) { - ath10k_err("PCI iomap error\n"); + ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM); ret = -EIO; goto err_master; } @@ -2395,16 +2710,34 @@ static int ath10k_pci_probe(struct pci_dev *pdev, spin_lock_init(&ar_pci->ce_lock); - ar_pci->cacheline_sz = dma_get_cache_alignment(); + ret = ath10k_do_pci_wake(ar); + if (ret) { + ath10k_err("Failed to get chip id: %d\n", ret); + goto err_iomap; + } - ret = ath10k_core_register(ar); + chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + + ath10k_do_pci_sleep(ar); + + ret = ath10k_pci_alloc_ce(ar); if (ret) { - ath10k_err("could not register driver core (%d)\n", ret); + ath10k_err("failed to allocate copy engine pipes: %d\n", ret); goto err_iomap; } + ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err("failed to register driver core: %d\n", ret); + goto err_free_ce; + } + return 0; +err_free_ce: + ath10k_pci_free_ce(ar); err_iomap: pci_iounmap(pdev, mem); err_master: @@ -2414,7 +2747,6 @@ err_region: err_device: pci_disable_device(pdev); err_ar: - pci_set_drvdata(pdev, NULL); ath10k_core_destroy(ar); err_ar_pci: /* call HIF PCI free here */ @@ -2428,7 +2760,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) struct ath10k *ar = pci_get_drvdata(pdev); struct ath10k_pci *ar_pci; - ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); + ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; @@ -2438,11 +2770,9 @@ static void ath10k_pci_remove(struct pci_dev *pdev) if (!ar_pci) return; - tasklet_kill(&ar_pci->msi_fw_err); - ath10k_core_unregister(ar); + ath10k_pci_free_ce(ar); - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ar_pci->mem); pci_release_region(pdev, BAR_NUM); pci_clear_master(pdev); @@ -2467,7 +2797,7 @@ static int __init ath10k_pci_init(void) ret = pci_register_driver(&ath10k_pci_driver); if (ret) - ath10k_err("pci_register_driver failed [%d]\n", ret); + ath10k_err("failed to register PCI driver: %d\n", ret); return ret; } @@ -2483,9 +2813,5 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE); -MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); |
