diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 2326 |
1 files changed, 1469 insertions, 857 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index c36a5f33739..20f0ed956df 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -10,7 +10,7 @@ * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman - * Slowpath rework by Vladislav Zolotarov + * Slowpath and fastpath rework by Vladislav Zolotarov * Statistics and Link management by Yitchak Gertner * */ @@ -56,15 +56,15 @@ #include "bnx2x_init_ops.h" #include "bnx2x_dump.h" -#define DRV_MODULE_VERSION "1.48.105-1" -#define DRV_MODULE_RELDATE "2009/04/22" +#define DRV_MODULE_VERSION "1.52.1" +#define DRV_MODULE_RELDATE "2009/08/12" #define BNX2X_BC_VER 0x040200 #include <linux/firmware.h> #include "bnx2x_fw_file_hdr.h" /* FW files */ -#define FW_FILE_PREFIX_E1 "bnx2x-e1-" -#define FW_FILE_PREFIX_E1H "bnx2x-e1h-" +#define FW_FILE_PREFIX_E1 "bnx2x-e1-" +#define FW_FILE_PREFIX_E1H "bnx2x-e1h-" /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) @@ -80,7 +80,18 @@ MODULE_VERSION(DRV_MODULE_VERSION); static int multi_mode = 1; module_param(multi_mode, int, 0); -MODULE_PARM_DESC(multi_mode, " Use per-CPU queues"); +MODULE_PARM_DESC(multi_mode, " Multi queue mode " + "(0 Disable; 1 Enable (default))"); + +static int num_rx_queues; +module_param(num_rx_queues, int, 0); +MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" + " (default is half number of CPUs)"); + +static int num_tx_queues; +module_param(num_tx_queues, int, 0); +MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1" + " (default is half number of CPUs)"); static int disable_tpa; module_param(disable_tpa, int, 0); @@ -90,6 +101,10 @@ static int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); +static int dropless_fc; +module_param(dropless_fc, int, 0); +MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); + static int poll; module_param(poll, int, 0); MODULE_PARM_DESC(poll, " Use polling (for debug)"); @@ -123,12 +138,9 @@ static struct { static const struct pci_device_id bnx2x_pci_tbl[] = { - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 }, - { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, { 0 } }; @@ -141,7 +153,7 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); /* used only at init * locking is done by mcp */ -static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) +void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val) { pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr); pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val); @@ -188,7 +200,7 @@ static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, u32 len32) { - struct dmae_command *dmae = &bp->init_dmae; + struct dmae_command dmae; u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = 200; @@ -201,43 +213,43 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, return; } - mutex_lock(&bp->dmae_mutex); - - memset(dmae, 0, sizeof(struct dmae_command)); + memset(&dmae, 0, sizeof(struct dmae_command)); - dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | + dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | #ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | + DMAE_CMD_ENDIANITY_B_DW_SWAP | #else - DMAE_CMD_ENDIANITY_DW_SWAP | + DMAE_CMD_ENDIANITY_DW_SWAP | #endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = U64_LO(dma_addr); - dmae->src_addr_hi = U64_HI(dma_addr); - dmae->dst_addr_lo = dst_addr >> 2; - dmae->dst_addr_hi = 0; - dmae->len = len32; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_val = DMAE_COMP_VAL; + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae.src_addr_lo = U64_LO(dma_addr); + dmae.src_addr_hi = U64_HI(dma_addr); + dmae.dst_addr_lo = dst_addr >> 2; + dmae.dst_addr_hi = 0; + dmae.len = len32; + dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); + dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); + dmae.comp_val = DMAE_COMP_VAL; DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" DP_LEVEL "src_addr [%x:%08x] len [%d *4] " "dst_addr [%x:%08x (%08x)]\n" DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr, - dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); + dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, + dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr, + dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); + mutex_lock(&bp->dmae_mutex); + *wb_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); udelay(5); @@ -261,7 +273,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) { - struct dmae_command *dmae = &bp->init_dmae; + struct dmae_command dmae; u32 *wb_comp = bnx2x_sp(bp, wb_comp); int cnt = 200; @@ -276,41 +288,41 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) return; } - mutex_lock(&bp->dmae_mutex); + memset(&dmae, 0, sizeof(struct dmae_command)); - memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4); - memset(dmae, 0, sizeof(struct dmae_command)); - - dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | - DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | - DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | + dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | + DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | + DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET | #ifdef __BIG_ENDIAN - DMAE_CMD_ENDIANITY_B_DW_SWAP | + DMAE_CMD_ENDIANITY_B_DW_SWAP | #else - DMAE_CMD_ENDIANITY_DW_SWAP | + DMAE_CMD_ENDIANITY_DW_SWAP | #endif - (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | - (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); - dmae->src_addr_lo = src_addr >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); - dmae->len = len32; - dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); - dmae->comp_val = DMAE_COMP_VAL; + (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | + (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); + dmae.src_addr_lo = src_addr >> 2; + dmae.src_addr_hi = 0; + dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); + dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); + dmae.len = len32; + dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); + dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); + dmae.comp_val = DMAE_COMP_VAL; DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" DP_LEVEL "src_addr [%x:%08x] len [%d *4] " "dst_addr [%x:%08x (%08x)]\n" DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr, - dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val); + dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, + dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr, + dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); + + mutex_lock(&bp->dmae_mutex); + memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4); *wb_comp = 0; - bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); + bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); udelay(5); @@ -334,6 +346,21 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) mutex_unlock(&bp->dmae_mutex); } +void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, + u32 addr, u32 len) +{ + int offset = 0; + + while (len > DMAE_LEN32_WR_MAX) { + bnx2x_write_dmae(bp, phys_addr + offset, + addr + offset, DMAE_LEN32_WR_MAX); + offset += DMAE_LEN32_WR_MAX * 4; + len -= DMAE_LEN32_WR_MAX; + } + + bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); +} + /* used only for slowpath so not inlined */ static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo) { @@ -542,16 +569,15 @@ static void bnx2x_panic_dump(struct bnx2x *bp) /* Tx */ for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" - " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx), + " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), fp->status_blk->c_status_block.status_block_index, - hw_prods->packets_prod, hw_prods->bds_prod); + fp->tx_db.data.prod); } /* Rings */ @@ -653,6 +679,11 @@ static void bnx2x_int_enable(struct bnx2x *bp) val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, addr, val); + /* + * Ensure that HC_CONFIG is written before leading/trailing edge config + */ + mmiowb(); + barrier(); if (CHIP_IS_E1H(bp)) { /* init leading/trailing edge */ @@ -667,6 +698,9 @@ static void bnx2x_int_enable(struct bnx2x *bp) REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); } + + /* Make sure that interrupts are indeed enabled from here on */ + mmiowb(); } static void bnx2x_int_disable(struct bnx2x *bp) @@ -689,7 +723,6 @@ static void bnx2x_int_disable(struct bnx2x *bp) REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! proper val not read from IGU!\n"); - } static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) @@ -699,6 +732,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) /* disable interrupt handling */ atomic_inc(&bp->intr_sem); + smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ + if (disable_hw) /* prevent the HW from sending interrupts */ bnx2x_int_disable(bp); @@ -740,6 +775,10 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n", (*(u32 *)&igu_ack), hc_addr); REG_WR(bp, hc_addr, (*(u32 *)&igu_ack)); + + /* Make sure that ACK is written */ + mmiowb(); + barrier(); } static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) @@ -776,16 +815,6 @@ static u16 bnx2x_ack_int(struct bnx2x *bp) * fast path service functions */ -static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) -{ - u16 tx_cons_sb; - - /* Tell compiler that status block fields can change */ - barrier(); - tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb); - return (fp->tx_pkt_cons != tx_cons_sb); -} - static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) { /* Tell compiler that consumer and producer can change */ @@ -800,7 +829,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, u16 idx) { struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; - struct eth_tx_bd *tx_bd; + struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_bd *tx_data_bd; struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; @@ -810,51 +840,46 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* unmap first bd */ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); - tx_bd = &fp->tx_desc_ring[bd_idx]; - pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd), - BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); + tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; + pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); - nbd = le16_to_cpu(tx_bd->nbd) - 1; - new_cons = nbd + tx_buf->first_bd; + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR - if (nbd > (MAX_SKB_FRAGS + 2)) { + if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { BNX2X_ERR("BAD nbd!\n"); bnx2x_panic(); } #endif + new_cons = nbd + tx_buf->first_bd; - /* Skip a parse bd and the TSO split header bd - since they have no mapping */ - if (nbd) - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + /* Get the next bd */ + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM | - ETH_TX_BD_FLAGS_TCP_CSUM | - ETH_TX_BD_FLAGS_SW_LSO)) { - if (--nbd) - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - tx_bd = &fp->tx_desc_ring[bd_idx]; - /* is this a TSO split header bd? */ - if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) { - if (--nbd) - bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - } + /* Skip a parse bd... */ + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + + /* ...and the TSO split header bd since they have no mapping */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } /* now free frags */ while (nbd > 0) { DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); - tx_bd = &fp->tx_desc_ring[bd_idx]; - pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd), - BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE); + tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; + pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), + BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); if (--nbd) bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } /* release skb */ WARN_ON(!skb); - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -896,7 +921,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) return; #endif - txq = netdev_get_tx_queue(bp->dev, fp->index); + txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); hw_cons = le16_to_cpu(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; @@ -926,8 +951,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) /* TBD need a thresh? */ if (unlikely(netif_tx_queue_stopped(txq))) { - __netif_tx_lock(txq, smp_processor_id()); - /* Need to make the tx_bd_cons update visible to start_xmit() * before checking for netif_tx_queue_stopped(). Without the * memory barrier, there is a small possibility that @@ -940,8 +963,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp) (bp->state == BNX2X_STATE_OPEN) && (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) netif_tx_wake_queue(txq); - - __netif_tx_unlock(txq); } } @@ -1009,6 +1030,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, break; case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): + case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED): DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); break; @@ -1491,6 +1513,13 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); + /* Prefetch the page containing the BD descriptor + at producer's index. It will be needed when new skb is + allocated */ + prefetch((void *)(PAGE_ALIGN((unsigned long) + (&fp->rx_desc_ring[bd_prod])) - + PAGE_SIZE + 1)); + cqe = &fp->rx_comp_ring[comp_ring_cons]; cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; @@ -1599,7 +1628,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb = new_skb; - } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) { + } else + if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), bp->rx_buf_size, @@ -1629,6 +1659,7 @@ reuse_rx: } skb_record_rx_queue(skb, fp->index); + #ifdef BCM_VLAN if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & @@ -1674,7 +1705,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) { struct bnx2x_fastpath *fp = fp_cookie; struct bnx2x *bp = fp->bp; - int index = fp->index; /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { @@ -1683,20 +1713,34 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) } DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", - index, fp->sb_id); + fp->index, fp->sb_id); bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return IRQ_HANDLED; #endif + /* Handle Rx or Tx according to MSI-X vector */ + if (fp->is_rx_queue) { + prefetch(fp->rx_cons_sb); + prefetch(&fp->status_blk->u_status_block.status_block_index); - prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->c_status_block.status_block_index); - prefetch(&fp->status_blk->u_status_block.status_block_index); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - napi_schedule(&bnx2x_fp(bp, index, napi)); + } else { + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->c_status_block.status_block_index); + + bnx2x_update_fpsb_idx(fp); + rmb(); + bnx2x_tx_int(fp); + + /* Re-enable interrupts */ + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); + } return IRQ_HANDLED; } @@ -1706,6 +1750,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) struct bnx2x *bp = netdev_priv(dev_instance); u16 status = bnx2x_ack_int(bp); u16 mask; + int i; /* Return here if interrupt is shared and it's not for us */ if (unlikely(status == 0)) { @@ -1725,18 +1770,38 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) return IRQ_HANDLED; #endif - mask = 0x2 << bp->fp[0].sb_id; - if (status & mask) { - struct bnx2x_fastpath *fp = &bp->fp[0]; + for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { + struct bnx2x_fastpath *fp = &bp->fp[i]; - prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); - prefetch(&fp->status_blk->c_status_block.status_block_index); - prefetch(&fp->status_blk->u_status_block.status_block_index); + mask = 0x2 << fp->sb_id; + if (status & mask) { + /* Handle Rx or Tx according to SB id */ + if (fp->is_rx_queue) { + prefetch(fp->rx_cons_sb); + prefetch(&fp->status_blk->u_status_block. + status_block_index); - napi_schedule(&bnx2x_fp(bp, 0, napi)); + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - status &= ~mask; + } else { + prefetch(fp->tx_cons_sb); + prefetch(&fp->status_blk->c_status_block. + status_block_index); + + bnx2x_update_fpsb_idx(fp); + rmb(); + bnx2x_tx_int(fp); + + /* Re-enable interrupts */ + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, + le16_to_cpu(fp->fp_u_idx), + IGU_INT_NOP, 1); + bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, + le16_to_cpu(fp->fp_c_idx), + IGU_INT_ENABLE, 1); + } + status &= ~mask; + } } @@ -2063,6 +2128,12 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) static void bnx2x_link_report(struct bnx2x *bp) { + if (bp->state == BNX2X_STATE_DISABLED) { + netif_carrier_off(bp->dev); + printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); + return; + } + if (bp->link_vars.link_up) { if (bp->state == BNX2X_STATE_OPEN) netif_carrier_on(bp->dev); @@ -2102,9 +2173,7 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) /* Initialize link parameters structure variables */ /* It is recommended to turn off RX FC for jumbo frames for better performance */ - if (IS_E1HMF(bp)) - bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; - else if (bp->dev->mtu > 5000) + if (bp->dev->mtu > 5000) bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; else bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; @@ -2199,6 +2268,46 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp) bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; } +/* Calculates the sum of vn_min_rates. + It's needed for further normalizing of the min_rates. + Returns: + sum of vn_min_rates. + or + 0 - if all the min_rates are 0. + In the later case fainess algorithm should be deactivated. + If not all min_rates are zero then those that are zeroes will be set to 1. + */ +static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) +{ + int all_zero = 1; + int port = BP_PORT(bp); + int vn; + + bp->vn_weight_sum = 0; + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + int func = 2*vn + port; + u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT) * 100; + + /* Skip hidden vns */ + if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) + continue; + + /* If min rate is zero - set it to 1 */ + if (!vn_min_rate) + vn_min_rate = DEF_MIN_RATE; + else + all_zero = 0; + + bp->vn_weight_sum += vn_min_rate; + } + + /* ... only if all min rates are zeros - disable fairness */ + if (all_zero) + bp->vn_weight_sum = 0; +} + static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) { struct rate_shaping_vars_per_vn m_rs_vn; @@ -2276,7 +2385,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) if (bp->link_vars.link_up) { /* dropless flow control */ - if (CHIP_IS_E1H(bp)) { + if (CHIP_IS_E1H(bp) && bp->dropless_fc) { int port = BP_PORT(bp); u32 pause_enabled = 0; @@ -2284,7 +2393,7 @@ static void bnx2x_link_attn(struct bnx2x *bp) pause_enabled = 1; REG_WR(bp, BAR_USTRORM_INTMEM + - USTORM_PAUSE_ENABLED_OFFSET(port), + USTORM_ETH_PAUSE_ENABLED_OFFSET(port), pause_enabled); } @@ -2309,14 +2418,12 @@ static void bnx2x_link_attn(struct bnx2x *bp) int func; int vn; + /* Set the attention towards other drivers on the same port */ for (vn = VN_0; vn < E1HVN_MAX; vn++) { if (vn == BP_E1HVN(bp)) continue; func = ((vn << 1) | port); - - /* Set the attention towards other drivers - on the same port */ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); } @@ -2342,6 +2449,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) static void bnx2x__link_status_update(struct bnx2x *bp) { + int func = BP_FUNC(bp); + if (bp->state != BNX2X_STATE_OPEN) return; @@ -2352,6 +2461,9 @@ static void bnx2x__link_status_update(struct bnx2x *bp) else bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + bnx2x_calc_vn_weight_sum(bp); + /* indicate link status */ bnx2x_link_report(bp); } @@ -2380,6 +2492,152 @@ static void bnx2x_pmf_update(struct bnx2x *bp) * General service functions */ +/* send the MCP a request, block until there is a reply */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) +{ + int func = BP_FUNC(bp); + u32 seq = ++bp->fw_seq; + u32 rc = 0; + u32 cnt = 1; + u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; + + SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); + + do { + /* let the FW do it's magic ... */ + msleep(delay); + + rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); + + /* Give the FW up to 2 second (200*10ms) */ + } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); + + DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt*delay, rc, seq); + + /* is this a reply to our command? */ + if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) + rc &= FW_MSG_CODE_MASK; + else { + /* FW BUG! */ + BNX2X_ERR("FW failed to respond!\n"); + bnx2x_fw_dump(bp); + rc = 0; + } + + return rc; +} + +static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); +static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set); +static void bnx2x_set_rx_mode(struct net_device *dev); + +static void bnx2x_e1h_disable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int i; + + bp->rx_mode = BNX2X_RX_MODE_NONE; + bnx2x_set_storm_rx_mode(bp); + + netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + + bnx2x_set_mac_addr_e1h(bp, 0); + + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); + + netif_carrier_off(bp->dev); +} + +static void bnx2x_e1h_enable(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + + bnx2x_set_mac_addr_e1h(bp, 1); + + /* Tx queue should be only reenabled */ + netif_tx_wake_all_queues(bp->dev); + + /* Initialize the receive filter. */ + bnx2x_set_rx_mode(bp->dev); +} + +static void bnx2x_update_min_max(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int vn, i; + + /* Init rate shaping and fairness contexts */ + bnx2x_init_port_minmax(bp); + + bnx2x_calc_vn_weight_sum(bp); + + for (vn = VN_0; vn < E1HVN_MAX; vn++) + bnx2x_init_vn_minmax(bp, 2*vn + port); + + if (bp->port.pmf) { + int func; + + /* Set the attention towards other drivers on the same port */ + for (vn = VN_0; vn < E1HVN_MAX; vn++) { + if (vn == BP_E1HVN(bp)) + continue; + + func = ((vn << 1) | port); + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 + + (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1); + } + + /* Store it to internal memory */ + for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) + REG_WR(bp, BAR_XSTRORM_INTMEM + + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4, + ((u32 *)(&bp->cmng))[i]); + } +} + +static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +{ + int func = BP_FUNC(bp); + + DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); + bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); + + if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + + if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { + DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); + bp->state = BNX2X_STATE_DISABLED; + + bnx2x_e1h_disable(bp); + } else { + DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); + bp->state = BNX2X_STATE_OPEN; + + bnx2x_e1h_enable(bp); + } + dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + } + if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + + bnx2x_update_min_max(bp); + dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + } + + /* Report results to MCP */ + if (dcc_event) + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE); + else + bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); +} + /* the slow path queue is odd since completions arrive on the fastpath ring */ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, u32 data_hi, u32 data_lo, int common) @@ -2430,9 +2688,14 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, bp->spq_prod_idx++; } + /* Make sure that BD data is updated before writing the producer */ + wmb(); + REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); + mmiowb(); + spin_unlock_bh(&bp->spq_lock); return 0; } @@ -2599,11 +2862,28 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) } } +static inline void bnx2x_fan_failure(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + /* mark the failure */ + bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; + bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; + SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config, + bp->link_params.ext_phy_config); + + /* log the failure */ + printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused" + " the driver to shutdown the card to prevent permanent" + " damage. Please contact Dell Support for assistance\n", + bp->dev->name); +} + static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) { int port = BP_PORT(bp); int reg_offset; - u32 val; + u32 val, swap_val, swap_override; reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); @@ -2616,36 +2896,32 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) BNX2X_ERR("SPIO5 hw attention\n"); + /* Fan failure attention */ switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: - /* Fan failure attention */ - + /* Low power mode is controlled by GPIO 2 */ + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); /* The PHY reset is controlled by GPIO 1 */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - /* Low power mode is controlled by GPIO 2 */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, + break; + + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + /* The PHY reset is controlled by GPIO 1 */ + /* fake the port number to cancel the swap done in + set_gpio() */ + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + port = (swap_val && swap_override) ^ 1; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - /* mark the failure */ - bp->link_params.ext_phy_config &= - ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; - bp->link_params.ext_phy_config |= - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; - SHMEM_WR(bp, - dev_info.port_hw_config[port]. - external_phy_config, - bp->link_params.ext_phy_config); - /* log the failure */ - printk(KERN_ERR PFX "Fan Failure on Network" - " Controller %s has caused the driver to" - " shutdown the card to prevent permanent" - " damage. Please contact Dell Support for" - " assistance\n", bp->dev->name); break; default: break; } + bnx2x_fan_failure(bp); } if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 | @@ -2662,7 +2938,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set0 0x%x\n", - (attn & HW_INTERRUT_ASSERT_SET_0)); + (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); bnx2x_panic(); } } @@ -2693,7 +2969,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set1 0x%x\n", - (attn & HW_INTERRUT_ASSERT_SET_1)); + (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); bnx2x_panic(); } } @@ -2733,7 +3009,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set2 0x%x\n", - (attn & HW_INTERRUT_ASSERT_SET_2)); + (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); bnx2x_panic(); } } @@ -2748,9 +3024,12 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) int func = BP_FUNC(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); + val = SHMEM_RD(bp, func_mb[func].drv_status); + if (val & DRV_STATUS_DCC_EVENT_MASK) + bnx2x_dcc_event(bp, + (val & DRV_STATUS_DCC_EVENT_MASK)); bnx2x__link_status_update(bp); - if (SHMEM_RD(bp, func_mb[func].drv_status) & - DRV_STATUS_PMF) + if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); } else if (attn & BNX2X_MC_ASSERT_BITS) { @@ -3109,53 +3388,6 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) } } -static void bnx2x_stats_init(struct bnx2x *bp) -{ - int port = BP_PORT(bp); - int i; - - bp->stats_pending = 0; - bp->executer_idx = 0; - bp->stats_counter = 0; - - /* port stats */ - if (!BP_NOMCP(bp)) - bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); - else - bp->port.port_stx = 0; - DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx); - - memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); - bp->port.old_nig_stats.brb_discard = - REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); - bp->port.old_nig_stats.brb_truncate = - REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); - - /* function stats */ - for_each_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - - memset(&fp->old_tclient, 0, - sizeof(struct tstorm_per_client_stats)); - memset(&fp->old_uclient, 0, - sizeof(struct ustorm_per_client_stats)); - memset(&fp->old_xclient, 0, - sizeof(struct xstorm_per_client_stats)); - memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); - } - - memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); - memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); - - bp->stats_state = STATS_STATE_DISABLED; - if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx) - bnx2x_stats_handle(bp, STATS_EVENT_PMF); -} - static void bnx2x_hw_stats_post(struct bnx2x *bp) { struct dmae_command *dmae = &bp->stats_dmae; @@ -3716,7 +3948,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) struct bnx2x_eth_stats *estats = &bp->eth_stats; int i; - memset(&(fstats->total_bytes_received_hi), 0, + memcpy(&(fstats->total_bytes_received_hi), + &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), sizeof(struct host_func_stats) - 2*sizeof(u32)); estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; @@ -3725,7 +3958,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) estats->no_buff_discard_hi = 0; estats->no_buff_discard_lo = 0; - for_each_queue(bp, i) { + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int cl_id = fp->cl_id; struct tstorm_per_ |