diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2010-10-30 12:35:11 +0100 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2010-10-30 12:35:11 +0100 |
commit | 67577927e8d7a1f4b09b4992df640eadc6aacb36 (patch) | |
tree | 2e9efe6b5745965faf0dcc084d4613d9356263f9 /drivers/net/bna/bnad.c | |
parent | 6fe4c590313133ebd5dadb769031489ff178ece1 (diff) | |
parent | 51f00a471ce8f359627dd99aeac322947a0e491b (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Conflicts:
drivers/mtd/mtd_blkdevs.c
Merge Grant's device-tree bits so that we can apply the subsequent fixes.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/net/bna/bnad.c')
-rw-r--r-- | drivers/net/bna/bnad.c | 3264 |
1 files changed, 3264 insertions, 0 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c new file mode 100644 index 00000000000..7e839b9cec2 --- /dev/null +++ b/drivers/net/bna/bnad.c @@ -0,0 +1,3264 @@ +/* + * Linux network driver for Brocade Converged Network Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +/* + * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + */ +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/in.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> +#include <linux/ip.h> + +#include "bnad.h" +#include "bna.h" +#include "cna.h" + +static DEFINE_MUTEX(bnad_fwimg_mutex); + +/* + * Module params + */ +static uint bnad_msix_disable; +module_param(bnad_msix_disable, uint, 0444); +MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); + +static uint bnad_ioc_auto_recover = 1; +module_param(bnad_ioc_auto_recover, uint, 0444); +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); + +/* + * Global variables + */ +u32 bnad_rxqs_per_cq = 2; + +static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +/* + * Local MACROS + */ +#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) + +#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) + +#define BNAD_GET_MBOX_IRQ(_bnad) \ + (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ + ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \ + ((_bnad)->pcidev->irq)) + +#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ +do { \ + (_res_info)->res_type = BNA_RES_T_MEM; \ + (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ + (_res_info)->res_u.mem_info.num = (_num); \ + (_res_info)->res_u.mem_info.len = \ + sizeof(struct bnad_unmap_q) + \ + (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ +} while (0) + +/* + * Reinitialize completions in CQ, once Rx is taken down + */ +static void +bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bna_cq_entry *cmpl, *next_cmpl; + unsigned int wi_range, wis = 0, ccb_prod = 0; + int i; + + BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, + wi_range); + + for (i = 0; i < ccb->q_depth; i++) { + wis++; + if (likely(--wi_range)) + next_cmpl = cmpl + 1; + else { + BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); + wis = 0; + BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, + next_cmpl, wi_range); + } + cmpl->valid = 0; + cmpl = next_cmpl; + } +} + +/* + * Frees all pending Tx Bufs + * At this point no activity is expected on the Q, + * so DMA unmap & freeing is fine. + */ +static void +bnad_free_all_txbufs(struct bnad *bnad, + struct bna_tcb *tcb) +{ + u16 unmap_cons; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb = NULL; + int i; + + unmap_array = unmap_q->unmap_array; + + unmap_cons = 0; + while (unmap_cons < unmap_q->q_depth) { + skb = unmap_array[unmap_cons].skb; + if (!skb) { + unmap_cons++; + continue; + } + unmap_array[unmap_cons].skb = NULL; + + pci_unmap_single(bnad->pcidev, + pci_unmap_addr(&unmap_array[unmap_cons], + dma_addr), skb_headlen(skb), + PCI_DMA_TODEVICE); + + pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); + unmap_cons++; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + pci_unmap_page(bnad->pcidev, + pci_unmap_addr(&unmap_array[unmap_cons], + dma_addr), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, + 0); + unmap_cons++; + } + dev_kfree_skb_any(skb); + } +} + +/* Data Path Handlers */ + +/* + * bnad_free_txbufs : Frees the Tx bufs on Tx completion + * Can be called in a) Interrupt context + * b) Sending context + * c) Tasklet context + */ +static u32 +bnad_free_txbufs(struct bnad *bnad, + struct bna_tcb *tcb) +{ + u32 sent_packets = 0, sent_bytes = 0; + u16 wis, unmap_cons, updated_hw_cons; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct sk_buff *skb; + int i; + + /* + * Just return if TX is stopped. This check is useful + * when bnad_free_txbufs() runs out of a tasklet scheduled + * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit + * but this routine runs actually after the cleanup has been + * executed. + */ + if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) + return 0; + + updated_hw_cons = *(tcb->hw_consumer_index); + + wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, + updated_hw_cons, tcb->q_depth); + + BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); + + unmap_array = unmap_q->unmap_array; + unmap_cons = unmap_q->consumer_index; + + prefetch(&unmap_array[unmap_cons + 1]); + while (wis) { + skb = unmap_array[unmap_cons].skb; + + unmap_array[unmap_cons].skb = NULL; + + sent_packets++; + sent_bytes += skb->len; + wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); + + pci_unmap_single(bnad->pcidev, + pci_unmap_addr(&unmap_array[unmap_cons], + dma_addr), skb_headlen(skb), + PCI_DMA_TODEVICE); + pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); + BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); + + prefetch(&unmap_array[unmap_cons + 1]); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + prefetch(&unmap_array[unmap_cons + 1]); + + pci_unmap_page(bnad->pcidev, + pci_unmap_addr(&unmap_array[unmap_cons], + dma_addr), + skb_shinfo(skb)->frags[i].size, + PCI_DMA_TODEVICE); + pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, + 0); + BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); + } + dev_kfree_skb_any(skb); + } + + /* Update consumer pointers. */ + tcb->consumer_index = updated_hw_cons; + unmap_q->consumer_index = unmap_cons; + + tcb->txq->tx_packets += sent_packets; + tcb->txq->tx_bytes += sent_bytes; + + return sent_packets; +} + +/* Tx Free Tasklet function */ +/* Frees for all the tcb's in all the Tx's */ +/* + * Scheduled from sending context, so that + * the fat Tx lock is not held for too long + * in the sending context. + */ +static void +bnad_tx_free_tasklet(unsigned long bnad_ptr) +{ + struct bnad *bnad = (struct bnad *)bnad_ptr; + struct bna_tcb *tcb; + u32 acked; + int i, j; + + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + if (!tcb) + continue; + if (((u16) (*tcb->hw_consumer_index) != + tcb->consumer_index) && + (!test_and_set_bit(BNAD_TXQ_FREE_SENT, + &tcb->flags))) { + acked = bnad_free_txbufs(bnad, tcb); + bna_ib_ack(tcb->i_dbell, acked); + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } + } + } +} + +static u32 +bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct net_device *netdev = bnad->netdev; + u32 sent; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return 0; + + sent = bnad_free_txbufs(bnad, tcb); + if (sent) { + if (netif_queue_stopped(netdev) && + netif_carrier_ok(netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + bna_ib_ack(tcb->i_dbell, sent); + } else + bna_ib_ack(tcb->i_dbell, 0); + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + return sent; +} + +/* MSIX Tx Completion Handler */ +static irqreturn_t +bnad_msix_tx(int irq, void *data) +{ + struct bna_tcb *tcb = (struct bna_tcb *)data; + struct bnad *bnad = tcb->bnad; + + bnad_tx(bnad, tcb); + + return IRQ_HANDLED; +} + +static void +bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + rcb->producer_index = 0; + rcb->consumer_index = 0; + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; +} + +static void +bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q; + struct sk_buff *skb; + + unmap_q = rcb->unmap_q; + while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { + skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; + BUG_ON(!(skb)); + unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; + pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> + unmap_array[unmap_q->consumer_index], + dma_addr), rcb->rxq->buffer_size + + NET_IP_ALIGN, PCI_DMA_FROMDEVICE); + dev_kfree_skb(skb); + BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); + BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); + } + + bnad_reset_rcb(bnad, rcb); +} + +static void +bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) +{ + u16 to_alloc, alloced, unmap_prod, wi_range; + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_skb_unmap *unmap_array; + struct bna_rxq_entry *rxent; + struct sk_buff *skb; + dma_addr_t dma_addr; + + alloced = 0; + to_alloc = + BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); + + unmap_array = unmap_q->unmap_array; + unmap_prod = unmap_q->producer_index; + + BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); + + while (to_alloc--) { + if (!wi_range) { + BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, + wi_range); + } + skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN, + GFP_ATOMIC); + if (unlikely(!skb)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + goto finishing; + } + skb->dev = bnad->netdev; + skb_reserve(skb, NET_IP_ALIGN); + unmap_array[unmap_prod].skb = skb; + dma_addr = pci_map_single(bnad->pcidev, skb->data, + rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); + pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, + dma_addr); + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); + + rxent++; + wi_range--; + alloced++; + } + +finishing: + if (likely(alloced)) { + unmap_q->producer_index = unmap_prod; + rcb->producer_index = unmap_prod; + smp_mb(); + bna_rxq_prod_indx_doorbell(rcb); + } +} + +/* + * Locking is required in the enable path + * because it is called from a napi poll + * context, where the bna_lock is not held + * unlike the IRQ context. + */ +static void +bnad_enable_txrx_irqs(struct bnad *bnad) +{ + struct bna_tcb *tcb; + struct bna_ccb *ccb; + int i, j; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + bna_ib_coalescing_timer_set(tcb->i_dbell, + tcb->txq->ib->ib_config.coalescing_timeo); + bna_ib_ack(tcb->i_dbell, 0); + } + } + + for (i = 0; i < bnad->num_rx; i++) { + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + ccb = bnad->rx_info[i].rx_ctrl[j].ccb; + bnad_enable_rx_irq_unsafe(ccb); + } + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static inline void +bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { + if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) + >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) + bnad_alloc_n_post_rxbufs(bnad, rcb); + smp_mb__before_clear_bit(); + clear_bit(BNAD_RXQ_REFILL, &rcb->flags); + } +} + +static u32 +bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) +{ + struct bna_cq_entry *cmpl, *next_cmpl; + struct bna_rcb *rcb = NULL; + unsigned int wi_range, packets = 0, wis = 0; + struct bnad_unmap_q *unmap_q; + struct sk_buff *skb; + u32 flags; + u32 qid0 = ccb->rcb[0]->rxq->rxq_id; + struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; + + prefetch(bnad->netdev); + BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, + wi_range); + BUG_ON(!(wi_range <= ccb->q_depth)); + while (cmpl->valid && packets < budget) { + packets++; + BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); + + if (qid0 == cmpl->rxq_id) + rcb = ccb->rcb[0]; + else + rcb = ccb->rcb[1]; + + unmap_q = rcb->unmap_q; + + skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; + BUG_ON(!(skb)); + unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; + pci_unmap_single(bnad->pcidev, + pci_unmap_addr(&unmap_q-> + unmap_array[unmap_q-> + consumer_index], + dma_addr), + rcb->rxq->buffer_size, + PCI_DMA_FROMDEVICE); + BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); + + /* Should be more efficient ? Performance ? */ + BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); + + wis++; + if (likely(--wi_range)) + next_cmpl = cmpl + 1; + else { + BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); + wis = 0; + BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, + next_cmpl, wi_range); + BUG_ON(!(wi_range <= ccb->q_depth)); + } + prefetch(next_cmpl); + + flags = ntohl(cmpl->flags); + if (unlikely + (flags & + (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | + BNA_CQ_EF_TOO_LONG))) { + dev_kfree_skb_any(skb); + rcb->rxq->rx_packets_with_error++; + goto next; + } + + skb_put(skb, ntohs(cmpl->length)); + if (likely + (bnad->rx_csum && + (((flags & BNA_CQ_EF_IPV4) && + (flags & BNA_CQ_EF_L3_CKSUM_OK)) || + (flags & BNA_CQ_EF_IPV6)) && + (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && + (flags & BNA_CQ_EF_L4_CKSUM_OK))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += skb->len; + skb->protocol = eth_type_trans(skb, bnad->netdev); + + if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) { + struct bnad_rx_ctrl *rx_ctrl = + (struct bnad_rx_ctrl *)ccb->ctrl; + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp, + ntohs(cmpl->vlan_tag), skb); + else + vlan_hwaccel_receive_skb(skb, + bnad->vlan_grp, + ntohs(cmpl->vlan_tag)); + + } else { /* Not VLAN tagged/stripped */ + struct bnad_rx_ctrl *rx_ctrl = + (struct bnad_rx_ctrl *)ccb->ctrl; + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + napi_gro_receive(&rx_ctrl->napi, skb); + else + netif_receive_skb(skb); + } + +next: + cmpl->valid = 0; + cmpl = next_cmpl; + } + + BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); + + if (likely(ccb)) { + bna_ib_ack(ccb->i_dbell, packets); + bnad_refill_rxq(bnad, ccb->rcb[0]); + if (ccb->rcb[1]) + bnad_refill_rxq(bnad, ccb->rcb[1]); + } else + bna_ib_ack(ccb->i_dbell, 0); + + return packets; +} + +static void +bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) +{ + bna_ib_coalescing_timer_set(ccb->i_dbell, 0); + bna_ib_ack(ccb->i_dbell, 0); +} + +static void +bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) +{ + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ + bnad_enable_rx_irq_unsafe(ccb); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); + if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { + bnad_disable_rx_irq(bnad, ccb); + __napi_schedule((&rx_ctrl->napi)); + } + BNAD_UPDATE_CTR(bnad, netif_rx_schedule); +} + +/* MSIX Rx Path Handler */ +static irqreturn_t +bnad_msix_rx(int irq, void *data) +{ + struct bna_ccb *ccb = (struct bna_ccb *)data; + struct bnad *bnad = ccb->bnad; + + bnad_netif_rx_schedule_poll(bnad, ccb); + + return IRQ_HANDLED; +} + +/* Interrupt handlers */ + +/* Mbox Interrupt Handlers */ +static irqreturn_t +bnad_msix_mbox_handler(int irq, void *data) +{ + u32 intr_status; + unsigned long flags; + struct net_device *netdev = data; + struct bnad *bnad; + + bnad = netdev_priv(netdev); + + /* BNA_ISR_GET(bnad); Inc Ref count */ + spin_lock_irqsave(&bnad->bna_lock, flags); + + bna_intr_status_get(&bnad->bna, intr_status); + + if (BNA_IS_MBOX_ERR_INTR(intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* BNAD_ISR_PUT(bnad); Dec Ref count */ + return IRQ_HANDLED; +} + +static irqreturn_t +bnad_isr(int irq, void *data) +{ + int i, j; + u32 intr_status; + unsigned long flags; + struct net_device *netdev = data; + struct bnad *bnad = netdev_priv(netdev); + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) + return IRQ_NONE; + + bna_intr_status_get(&bnad->bna, intr_status); + + if (unlikely(!intr_status)) + return IRQ_NONE; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (BNA_IS_MBOX_ERR_INTR(intr_status)) { + bna_mbox_handler(&bnad->bna, intr_status); + if (!BNA_IS_INTX_DATA_INTR(intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Process data interrupts */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } +done: + return IRQ_HANDLED; +} + +/* + * Called in interrupt / callback context + * with bna_lock held, so cfg_flags access is OK + */ +static void +bnad_enable_mbox_irq(struct bnad *bnad) +{ + int irq = BNAD_GET_MBOX_IRQ(bnad); + + if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) + if (bnad->cfg_flags & BNAD_CF_MSIX) + enable_irq(irq); + + BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); +} + +/* + * Called with bnad->bna_lock held b'cos of + * bnad->cfg_flags access. + */ +static void +bnad_disable_mbox_irq(struct bnad *bnad) +{ + int irq = BNAD_GET_MBOX_IRQ(bnad); + + + if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) + if (bnad->cfg_flags & BNAD_CF_MSIX) + disable_irq_nosync(irq); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); +} + +/* Control Path Handlers */ + +/* Callbacks */ +void +bnad_cb_device_enable_mbox_intr(struct bnad *bnad) +{ + bnad_enable_mbox_irq(bnad); +} + +void +bnad_cb_device_disable_mbox_intr(struct bnad *bnad) +{ + bnad_disable_mbox_irq(bnad); +} + +void +bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status) +{ + complete(&bnad->bnad_completions.ioc_comp); + bnad->bnad_completions.ioc_comp_status = status; +} + +void +bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status) +{ + complete(&bnad->bnad_completions.ioc_comp); + bnad->bnad_completions.ioc_comp_status = status; +} + +static void +bnad_cb_port_disabled(void *arg, enum bna_cb_status status) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.port_comp); + + netif_carrier_off(bnad->netdev); +} + +void +bnad_cb_port_link_status(struct bnad *bnad, + enum bna_link_status link_status) +{ + bool link_up = 0; + + link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); + + if (link_status == BNA_CEE_UP) { + set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + BNAD_UPDATE_CTR(bnad, cee_up); + } else + clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + + if (link_up) { + if (!netif_carrier_ok(bnad->netdev)) { + pr_warn("bna: %s link up\n", + bnad->netdev->name); + netif_carrier_on(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { + /* Force an immediate Transmit Schedule */ + pr_info("bna: %s TX_STARTED\n", + bnad->netdev->name); + netif_wake_queue(bnad->netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } else { + netif_stop_queue(bnad->netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + } + } + } else { + if (netif_carrier_ok(bnad->netdev)) { + pr_warn("bna: %s link down\n", + bnad->netdev->name); + netif_carrier_off(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + } + } +} + +static void +bnad_cb_tx_disabled(void *arg, struct bna_tx *tx, + enum bna_cb_status status) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.tx_comp); +} + +static void +bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + struct bnad_unmap_q *unmap_q = tcb->unmap_q; + + tx_info->tcb[tcb->id] = tcb; + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; +} + +static void +bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + tx_info->tcb[tcb->id] = NULL; +} + +static void +bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; +} + +static void +bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = ccb; + ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; +} + +static void +bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = NULL; +} + +static void +bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + if (tx_info != &bnad->tx_info[0]) + return; + + clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); + netif_stop_queue(bnad->netdev); + pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); +} + +static void +bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) +{ + if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) + return; + + if (netif_carrier_ok(bnad->netdev)) { + pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); + netif_wake_queue(bnad->netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } +} + +static void +bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_unmap_q *unmap_q; + + if (!tcb || (!tcb->unmap_q)) + return; + + unmap_q = tcb->unmap_q; + if (!unmap_q->unmap_array) + return; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return; + + bnad_free_all_txbufs(bnad, tcb); + + unmap_q->producer_index = 0; + unmap_q->consumer_index = 0; + + smp_mb__before_clear_bit(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); +} + +static void +bnad_cb_rx_cleanup(struct bnad *bnad, + struct bna_ccb *ccb) +{ + bnad_cq_cmpl_init(bnad, ccb); + + bnad_free_rxbufs(bnad, ccb->rcb[0]); + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) { + bnad_free_rxbufs(bnad, ccb->rcb[1]); + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); + } +} + +static void +bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_unmap_q *unmap_q = rcb->unmap_q; + + set_bit(BNAD_RXQ_STARTED, &rcb->flags); + + /* Now allocate & post buffers for this RCB */ + /* !!Allocation in callback context */ + if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { + if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) + >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) + bnad_alloc_n_post_rxbufs(bnad, rcb); + smp_mb__before_clear_bit(); + clear_bit(BNAD_RXQ_REFILL, &rcb->flags); + } +} + +static void +bnad_cb_rx_disabled(void *arg, struct bna_rx *rx, + enum bna_cb_status status) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.rx_comp); +} + +static void +bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx, + enum bna_cb_status status) +{ + bnad->bnad_completions.mcast_comp_status = status; + complete(&bnad->bnad_completions.mcast_comp); +} + +void +bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats) +{ + if (status == BNA_CB_SUCCESS) + BNAD_UPDATE_CTR(bnad, hw_stats_updates); + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); +} + +/* Resource allocation, free functions */ + +static void +bnad_mem_free(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if (mem_info->mdl == NULL) + return; + + for (i = 0; i < mem_info->num; i++) { + if (mem_info->mdl[i].kva != NULL) { + if (mem_info->mem_type == BNA_MEM_T_DMA) { + BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), + dma_pa); + pci_free_consistent(bnad->pcidev, + mem_info->mdl[i].len, + mem_info->mdl[i].kva, dma_pa); + } else + kfree(mem_info->mdl[i].kva); + } + } + kfree(mem_info->mdl); + mem_info->mdl = NULL; +} + +static int +bnad_mem_alloc(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if ((mem_info->num == 0) || (mem_info->len == 0)) { + mem_info->mdl = NULL; + return 0; + } + + mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), + GFP_KERNEL); + if (mem_info->mdl == NULL) + return -ENOMEM; + + if (mem_info->mem_type == BNA_MEM_T_DMA) { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = + pci_alloc_consistent(bnad->pcidev, + mem_info->len, &dma_pa); + + if (mem_info->mdl[i].kva == NULL) + goto err_return; + + BNA_SET_DMA_ADDR(dma_pa, + &(mem_info->mdl[i].dma)); + } + } else { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = kzalloc(mem_info->len, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + } + } + + return 0; + +err_return: + bnad_mem_free(bnad, mem_info); + return -ENOMEM; +} + +/* Free IRQ for Mailbox */ +static void +bnad_mbox_irq_free(struct bnad *bnad, + struct bna_intr_info *intr_info) +{ + int irq; + unsigned long flags; + + if (intr_info->idl == NULL) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_disable_mbox_irq(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + irq = BNAD_GET_MBOX_IRQ(bnad); + free_irq(irq, bnad->netdev); + + kfree(intr_info->idl); +} + +/* + * Allocates IRQ for Mailbox, but keep it disabled + * This will be enabled once we get the mbox enable callback + * from bna + */ +static int +bnad_mbox_irq_alloc(struct bnad *bnad, + struct bna_intr_info *intr_info) +{ + int err; + unsigned long flags; + u32 irq; + irq_handler_t irq_handler; + + /* Mbox should use only 1 vector */ + + intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) { + irq_handler = (irq_handler_t)bnad_msix_mbox_handler; + irq = bnad->msix_table[bnad->msix_num - 1].vector; + flags = 0; + intr_info->intr_type = BNA_INTR_T_MSIX; + intr_info->idl[0].vector = bnad->msix_num - 1; + } else { + irq_handler = (irq_handler_t)bnad_isr; + irq = bnad->pcidev->irq; + flags = IRQF_SHARED; + intr_info->intr_type = BNA_INTR_T_INTX; + /* intr_info->idl.vector = 0 ? */ + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); + + /* + * Set the Mbox IRQ disable flag, so that the IRQ handler + * called from request_irq() for SHARED IRQs do not execute + */ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + err = request_irq(irq, irq_handler, flags, + bnad->mbox_irq_name, bnad->netdev); + + if (err) { + kfree(intr_info->idl); + intr_info->idl = NULL; + return err; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (bnad->cfg_flags & BNAD_CF_MSIX) + disable_irq_nosync(irq); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return 0; +} + +static void +bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) +{ + kfree(intr_info->idl); + intr_info->idl = NULL; +} + +/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ +static int +bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, + uint txrx_id, struct bna_intr_info *intr_info) +{ + int i, vector_start = 0; + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + intr_info->intr_type = BNA_INTR_T_MSIX; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + vector_start = txrx_id; + break; + + case BNAD_INTR_RX: + vector_start = bnad->num_tx * bnad->num_txq_per_tx + + txrx_id; + break; + + default: + BUG(); + } + + for (i = 0; i < intr_info->num; i++) + intr_info->idl[i].vector = vector_start + i; + } else { + intr_info->intr_type = BNA_INTR_T_INTX; + intr_info->num = 1; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */ + break; + + case BNAD_INTR_RX: + intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */ + break; + } + } + return 0; +} + +/** + * NOTE: Should be called for MSIX only + * Unregisters Tx MSIX vector(s) from the kernel + */ +static void +bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, + int num_txqs) +{ + int i; + int vector_num; + + for (i = 0; i < num_txqs; i++) { |