diff options
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r-- | drivers/net/bnx2x_main.c | 2640 |
1 files changed, 1707 insertions, 933 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 2e346a5e98c..00a78e8677b 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -50,15 +50,13 @@ #include <linux/zlib.h> #include <linux/io.h> -#include "bnx2x_reg.h" -#include "bnx2x_fw_defs.h" -#include "bnx2x_hsi.h" -#include "bnx2x_link.h" + #include "bnx2x.h" #include "bnx2x_init.h" +#include "bnx2x_dump.h" -#define DRV_MODULE_VERSION "1.45.27" -#define DRV_MODULE_RELDATE "2009/01/26" +#define DRV_MODULE_VERSION "1.48.105" +#define DRV_MODULE_RELDATE "2009/03/02" #define BNX2X_BC_VER 0x040200 /* Time in jiffies before concluding the transmitter is hung */ @@ -73,26 +71,32 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +static int multi_mode = 1; +module_param(multi_mode, int, 0); +MODULE_PARM_DESC(multi_mode, " Use per-CPU queues"); + static int disable_tpa; -static int use_inta; +module_param(disable_tpa, int, 0); +MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); + +static int int_mode; +module_param(int_mode, int, 0); +MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); + static int poll; +module_param(poll, int, 0); +MODULE_PARM_DESC(poll, " Use polling (for debug)"); + +static int mrrs = -1; +module_param(mrrs, int, 0); +MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); + static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Default debug msglevel"); + static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ -static int use_multi; -module_param(disable_tpa, int, 0); -module_param(use_inta, int, 0); -module_param(poll, int, 0); -module_param(debug, int, 0); -MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature"); -MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); -MODULE_PARM_DESC(poll, "use polling (for debug)"); -MODULE_PARM_DESC(debug, "default debug msglevel"); - -#ifdef BNX2X_MULTI -module_param(use_multi, int, 0); -MODULE_PARM_DESC(use_multi, "use per-CPU queues"); -#endif static struct workqueue_struct *bnx2x_wq; enum bnx2x_board_type { @@ -213,7 +217,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; - DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n" + DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" DP_LEVEL "src_addr [%x:%08x] len [%d *4] " "dst_addr [%x:%08x (%08x)]\n" DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", @@ -234,7 +238,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); if (!cnt) { - BNX2X_ERR("dmae timeout!\n"); + BNX2X_ERR("DMAE timeout!\n"); break; } cnt--; @@ -289,7 +293,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; - DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n" + DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" DP_LEVEL "src_addr [%x:%08x] len [%d *4] " "dst_addr [%x:%08x (%08x)]\n" DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", @@ -306,7 +310,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) while (*wb_comp != DMAE_COMP_VAL) { if (!cnt) { - BNX2X_ERR("dmae timeout!\n"); + BNX2X_ERR("DMAE timeout!\n"); break; } cnt--; @@ -468,7 +472,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) static void bnx2x_fw_dump(struct bnx2x *bp) { u32 mark, offset; - u32 data[9]; + __be32 data[9]; int word; mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); @@ -502,82 +506,103 @@ static void bnx2x_panic_dump(struct bnx2x *bp) BNX2X_ERR("begin crash dump -----------------\n"); - for_each_queue(bp, i) { + /* Indices */ + /* Common */ + BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" + " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" + " spq_prod_idx(%u)\n", + bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, + bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); + + /* Rx */ + for_each_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; - BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)" - " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", - i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, - fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); - BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)" + BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" " rx_comp_cons(%x) *rx_cons_sb(%x)\n", - fp->rx_bd_prod, fp->rx_bd_cons, + i, fp->rx_bd_prod, fp->rx_bd_cons, le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); - BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" - " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)" - " *sb_u_idx(%x) bd data(%x,%x)\n", - fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx, - fp->status_blk->c_status_block.status_block_index, - fp->fp_u_idx, - fp->status_blk->u_status_block.status_block_index, - hw_prods->packets_prod, hw_prods->bds_prod); - - start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); - end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); - for (j = start; j < end; j++) { - struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j]; + BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" + " fp_u_idx(%x) *sb_u_idx(%x)\n", + fp->rx_sge_prod, fp->last_max_sge, + le16_to_cpu(fp->fp_u_idx), + fp->status_blk->u_status_block.status_block_index); + } - BNX2X_ERR("packet[%x]=[%p,%x]\n", j, - sw_bd->skb, sw_bd->first_bd); - } + /* Tx */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct eth_tx_db_data *hw_prods = fp->hw_tx_prods; - start = TX_BD(fp->tx_bd_cons - 10); - end = TX_BD(fp->tx_bd_cons + 254); - for (j = start; j < end; j++) { - u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j]; + BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" + " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", + i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, + fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); + BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" + " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx), + fp->status_blk->c_status_block.status_block_index, + hw_prods->packets_prod, hw_prods->bds_prod); + } - BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n", - j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); - } + /* Rings */ + /* Rx */ + for_each_rx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); - for (j = start; j < end; j++) { + for (j = start; j != end; j = RX_BD(j + 1)) { u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; - BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", - j, rx_bd[1], rx_bd[0], sw_bd->skb); + BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", + i, j, rx_bd[1], rx_bd[0], sw_bd->skb); } start = RX_SGE(fp->rx_sge_prod); end = RX_SGE(fp->last_max_sge); - for (j = start; j < end; j++) { + for (j = start; j != end; j = RX_SGE(j + 1)) { u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; - BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n", - j, rx_sge[1], rx_sge[0], sw_page->page); + BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", + i, j, rx_sge[1], rx_sge[0], sw_page->page); } start = RCQ_BD(fp->rx_comp_cons - 10); end = RCQ_BD(fp->rx_comp_cons + 503); - for (j = start; j < end; j++) { + for (j = start; j != end; j = RCQ_BD(j + 1)) { u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; - BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n", - j, cqe[0], cqe[1], cqe[2], cqe[3]); + BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", + i, j, cqe[0], cqe[1], cqe[2], cqe[3]); } } - BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" - " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" - " spq_prod_idx(%u)\n", - bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, - bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); + /* Tx */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); + end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); + for (j = start; j != end; j = TX_BD(j + 1)) { + struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j]; + + BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n", + i, j, sw_bd->skb, sw_bd->first_bd); + } + + start = TX_BD(fp->tx_bd_cons - 10); + end = TX_BD(fp->tx_bd_cons + 254); + for (j = start; j != end; j = TX_BD(j + 1)) { + u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j]; + + BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n", + i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); + } + } bnx2x_fw_dump(bp); bnx2x_mc_assert(bp); @@ -590,37 +615,44 @@ static void bnx2x_int_enable(struct bnx2x *bp) u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; u32 val = REG_RD(bp, addr); int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; if (msix) { - val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0; + val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); + } else if (msi) { + val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; + val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | + HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | + HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", - val, port, addr, msix); + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", + val, port, addr); REG_WR(bp, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } - DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", - val, port, addr, msix); + DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", + val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); REG_WR(bp, addr, val); if (CHIP_IS_E1H(bp)) { /* init leading/trailing edge */ if (IS_E1HMF(bp)) { - val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4))); + val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); if (bp->port.pmf) - /* enable nig attention */ - val |= 0x0100; + /* enable nig and gpio3 attention */ + val |= 0x1100; } else val = 0xffff; @@ -643,15 +675,19 @@ static void bnx2x_int_disable(struct bnx2x *bp) DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); + /* flush all outstanding writes */ + mmiowb(); + REG_WR(bp, addr, val); if (REG_RD(bp, addr) != val) BNX2X_ERR("BUG! proper val not read from IGU!\n"); + } static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) { int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; - int i; + int i, offset; /* disable interrupt handling */ atomic_inc(&bp->intr_sem); @@ -661,11 +697,10 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) /* make sure all ISRs are done */ if (msix) { + synchronize_irq(bp->msix_table[0].vector); + offset = 1; for_each_queue(bp, i) - synchronize_irq(bp->msix_table[i].vector); - - /* one more for the Slow Path IRQ */ - synchronize_irq(bp->msix_table[i].vector); + synchronize_irq(bp->msix_table[i + offset].vector); } else synchronize_irq(bp->pdev->irq); @@ -748,7 +783,6 @@ static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) /* Tell compiler that consumer and producer can change */ barrier(); return (fp->tx_pkt_prod != fp->tx_pkt_cons); - } /* free skb in the packet ring at pos idx @@ -842,9 +876,10 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) return (s16)(fp->bp->tx_ring_size) - used; } -static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) +static void bnx2x_tx_int(struct bnx2x_fastpath *fp) { struct bnx2x *bp = fp->bp; + struct netdev_queue *txq; u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; int done = 0; @@ -853,6 +888,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) return; #endif + txq = netdev_get_tx_queue(bp->dev, fp->index); hw_cons = le16_to_cpu(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; @@ -874,32 +910,30 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work) bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); sw_cons++; done++; - - if (done == work) - break; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; - /* Need to make the tx_cons update visible to start_xmit() - * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that start_xmit() - * will miss it and cause the queue to be stopped forever. - */ - smp_mb(); - /* TBD need a thresh? */ - if (unlikely(netif_queue_stopped(bp->dev))) { + if (unlikely(netif_tx_queue_stopped(txq))) { - netif_tx_lock(bp->dev); + __netif_tx_lock(txq, smp_processor_id()); - if (netif_queue_stopped(bp->dev) && + /* Need to make the tx_bd_cons update visible to start_xmit() + * before checking for netif_tx_queue_stopped(). Without the + * memory barrier, there is a small possibility that + * start_xmit() will miss it and cause the queue to be stopped + * forever. + */ + smp_mb(); + + if ((netif_tx_queue_stopped(txq)) && (bp->state == BNX2X_STATE_OPEN) && (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) - netif_wake_queue(bp->dev); + netif_tx_wake_queue(txq); - netif_tx_unlock(bp->dev); + __netif_tx_unlock(txq); } } @@ -913,12 +947,12 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", - FP_IDX(fp), cid, command, bp->state, + fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.ramrod_type); bp->spq_left++; - if (FP_IDX(fp)) { + if (fp->index) { switch (command | fp->state) { case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING): @@ -1078,8 +1112,7 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, pci_dma_sync_single_for_device(bp->pdev, pci_unmap_addr(cons_rx_buf, mapping), - bp->rx_offset + RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); prod_rx_buf->skb = cons_rx_buf->skb; pci_unmap_addr_set(prod_rx_buf, mapping, @@ -1260,7 +1293,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, where we are and drop the whole packet */ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); if (unlikely(err)) { - bp->eth_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; return err; } @@ -1365,7 +1398,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* else drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, "Failed to allocate new skb - dropping packet!\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; } fp->tpa_state[queue] = BNX2X_TPA_STOP; @@ -1376,7 +1409,7 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) { - struct tstorm_eth_rx_producers rx_prods = {0}; + struct ustorm_eth_rx_producers rx_prods = {0}; int i; /* Update producers */ @@ -1394,16 +1427,16 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, */ wmb(); - for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) - REG_WR(bp, BAR_TSTRORM_INTMEM + - TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, + for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) + REG_WR(bp, BAR_USTRORM_INTMEM + + USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, ((u32 *)&rx_prods)[i]); mmiowb(); /* keep prod updates ordered */ DP(NETIF_MSG_RX_STATUS, - "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", - bd_prod, rx_comp_prod, rx_sge_prod); + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); } static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) @@ -1437,7 +1470,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_STATUS, "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n", - FP_IDX(fp), hw_comp_cons, sw_comp_cons); + fp->index, hw_comp_cons, sw_comp_cons); while (sw_comp_cons != hw_comp_cons) { struct sw_rx_bd *rx_buf = NULL; @@ -1527,7 +1560,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); - bp->eth_stats.rx_err_discard_pkt++; + fp->eth_q_stats.rx_err_discard_pkt++; goto reuse_rx; } @@ -1544,7 +1577,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR, "ERROR packet dropped " "because of alloc failure\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; } @@ -1570,7 +1603,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR, "ERROR packet dropped because " "of alloc failure\n"); - bp->eth_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); goto next_rx; @@ -1583,10 +1616,11 @@ reuse_rx: if (likely(BNX2X_RX_CSUM_OK(cqe))) skb->ip_summed = CHECKSUM_UNNECESSARY; else - bp->eth_stats.hw_csum_err++; + fp->eth_q_stats.hw_csum_err++; } } + skb_record_rx_queue(skb, fp->index); #ifdef BCM_VLAN if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & @@ -1632,7 +1666,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) { struct bnx2x_fastpath *fp = fp_cookie; struct bnx2x *bp = fp->bp; - int index = FP_IDX(fp); + int index = fp->index; /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { @@ -1641,8 +1675,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) } DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", - index, FP_SB_ID(fp)); - bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); + index, fp->sb_id); + bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -1654,15 +1688,14 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(&fp->status_blk->c_status_block.status_block_index); prefetch(&fp->status_blk->u_status_block.status_block_index); - netif_rx_schedule(&bnx2x_fp(bp, index, napi)); + napi_schedule(&bnx2x_fp(bp, index, napi)); return IRQ_HANDLED; } static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { - struct net_device *dev = dev_instance; - struct bnx2x *bp = netdev_priv(dev); + struct bnx2x *bp = netdev_priv(dev_instance); u16 status = bnx2x_ack_int(bp); u16 mask; @@ -1671,7 +1704,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) DP(NETIF_MSG_INTR, "not our interrupt!\n"); return IRQ_NONE; } - DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); + DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); /* Return here if interrupt is disabled */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) { @@ -1693,7 +1726,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) prefetch(&fp->status_blk->c_status_block.status_block_index); prefetch(&fp->status_blk->u_status_block.status_block_index); - netif_rx_schedule(&bnx2x_fp(bp, 0, napi)); + napi_schedule(&bnx2x_fp(bp, 0, napi)); status &= ~mask; } @@ -1806,26 +1839,50 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) /* HW Lock for shared dual port PHYs */ static void bnx2x_acquire_phy_lock(struct bnx2x *bp) { - u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); - mutex_lock(&bp->port.phy_mutex); - if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || - (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); + if (bp->port.need_hw_lock) + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); } static void bnx2x_release_phy_lock(struct bnx2x *bp) { - u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); - - if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || - (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); + if (bp->port.need_hw_lock) + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO); mutex_unlock(&bp->port.phy_mutex); } +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + int value; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + /* read GPIO value */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + + /* get the requested pin value */ + if ((gpio_reg & gpio_mask) == gpio_mask) + value = 1; + else + value = 0; + + DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); + + return value; +} + int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -1879,6 +1936,52 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) return 0; } +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) +{ + /* The GPIO should be swapped if swap register is set and active */ + int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && + REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port; + int gpio_shift = gpio_num + + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); + u32 gpio_mask = (1 << gpio_shift); + u32 gpio_reg; + + if (gpio_num > MISC_REGISTERS_GPIO_3) { + BNX2X_ERR("Invalid GPIO %d\n", gpio_num); + return -EINVAL; + } + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO int */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT); + + switch (mode) { + case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: + DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> " + "output low\n", gpio_num, gpio_shift); + /* clear SET and set CLR */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: + DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> " + "output high\n", gpio_num, gpio_shift); + /* clear CLR and set SET */ + gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); + gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); + break; + + default: + break; + } + + REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return 0; +} + static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) { u32 spio_mask = (1 << spio_num); @@ -1933,13 +2036,16 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp) bp->port.advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: bp->port.advertising |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; + case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: bp->port.advertising |= ADVERTISED_Asym_Pause; break; + default: bp->port.advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); @@ -1964,7 +2070,8 @@ static void bnx2x_link_report(struct bnx2x *bp) if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) { if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) { printk(", receive "); - if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) + if (bp->link_vars.flow_ctrl & + BNX2X_FLOW_CTRL_TX) printk("& transmit "); } else { printk(", transmit "); @@ -1979,7 +2086,7 @@ static void bnx2x_link_report(struct bnx2x *bp) } } -static u8 bnx2x_initial_phy_init(struct bnx2x *bp) +static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) { if (!BP_NOMCP(bp)) { u8 rc; @@ -1995,18 +2102,24 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp) bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; bnx2x_acquire_phy_lock(bp); + + if (load_mode == LOAD_DIAG) + bp->link_params.loopback_mode = LOOPBACK_XGXS_10; + rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); + bnx2x_release_phy_lock(bp); bnx2x_calc_fc_adv(bp); - if (bp->link_vars.link_up) + if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { + bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_report(bp); - + } return rc; } - BNX2X_ERR("Bootcode is missing -not initializing link\n"); + BNX2X_ERR("Bootcode is missing - can not initialize link\n"); return -EINVAL; } @@ -2019,17 +2132,17 @@ static void bnx2x_link_set(struct bnx2x *bp) bnx2x_calc_fc_adv(bp); } else - BNX2X_ERR("Bootcode is missing -not setting link\n"); + BNX2X_ERR("Bootcode is missing - can not set link\n"); } static void bnx2x__link_reset(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_release_phy_lock(bp); } else - BNX2X_ERR("Bootcode is missing -not resetting link\n"); + BNX2X_ERR("Bootcode is missing - can not reset link\n"); } static u8 bnx2x_link_test(struct bnx2x *bp) @@ -2043,119 +2156,42 @@ static u8 bnx2x_link_test(struct bnx2x *bp) return rc; } -/* Calculates the sum of vn_min_rates. - It's needed for further normalizing of the min_rates. - - Returns: - sum of vn_min_rates - or - 0 - if all the min_rates are 0. - In the later case fairness algorithm should be deactivated. - If not all min_rates are zero then those that are zeroes will - be set to 1. - */ -static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp) +static void bnx2x_init_port_minmax(struct bnx2x *bp) { - int i, port = BP_PORT(bp); - u32 wsum = 0; - int all_zero = 1; + u32 r_param = bp->link_vars.line_speed / 8; + u32 fair_periodic_timeout_usec; + u32 t_fair; - for (i = 0; i < E1HVN_MAX; i++) { - u32 vn_cfg = - SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config); - u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) { - /* If min rate is zero - set it to 1 */ - if (!vn_min_rate) - vn_min_rate = DEF_MIN_RATE; - else - all_zero = 0; - - wsum += vn_min_rate; - } - } - - /* ... only if all min rates are zeros - disable FAIRNESS */ - if (all_zero) - return 0; - - return wsum; -} - -static void bnx2x_init_port_minmax(struct bnx2x *bp, - int en_fness, - u16 port_rate, - struct cmng_struct_per_port *m_cmng_port) -{ - u32 r_param = port_rate / 8; - int port = BP_PORT(bp); - int i; - - memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port)); - - /* Enable minmax only if we are in e1hmf mode */ - if (IS_E1HMF(bp)) { - u32 fair_periodic_timeout_usec; - u32 t_fair; - - /* Enable rate shaping and fairness */ - m_cmng_port->flags.cmng_vn_enable = 1; - m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0; - m_cmng_port->flags.rate_shaping_enable = 1; - - if (!en_fness) - DP(NETIF_MSG_IFUP, "All MIN values are zeroes" - " fairness will be disabled\n"); + memset(&(bp->cmng.rs_vars), 0, + sizeof(struct rate_shaping_vars_per_port)); + memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port)); - /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ - m_cmng_port->rs_vars.rs_periodic_timeout = - RS_PERIODIC_TIMEOUT_USEC / 4; + /* 100 usec in SDM ticks = 25 since each tick is 4 usec */ + bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4; - /* this is the threshold below which no timer arming will occur - 1.25 coefficient is for the threshold to be a little bigger - than the real time, to compensate for timer in-accuracy */ - m_cmng_port->rs_vars.rs_threshold = + /* this is the threshold below which no timer arming will occur + 1.25 coefficient is for the threshold to be a little bigger + than the real time, to compensate for timer in-accuracy */ + bp->cmng.rs_vars.rs_threshold = (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4; - /* resolution of fairness timer */ - fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; - /* for 10G it is 1000usec. for 1G it is 10000usec. */ - t_fair = T_FAIR_COEF / port_rate; + /* resolution of fairness timer */ + fair_periodic_timeout_usec = QM_ARB_BYTES / r_param; + /* for 10G it is 1000usec. for 1G it is 10000usec. */ + t_fair = T_FAIR_COEF / bp->link_vars.line_speed; - /* this is the threshold below which we won't arm - the timer anymore */ - m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES; + /* this is the threshold below which we won't arm the timer anymore */ + bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES; - /* we multiply by 1e3/8 to get bytes/msec. - We don't want the credits to pass a credit - of the T_FAIR*FAIR_MEM (algorithm resolution) */ - m_cmng_port->fair_vars.upper_bound = - r_param * t_fair * FAIR_MEM; - /* since each tick is 4 usec */ - m_cmng_port->fair_vars.fairness_timeout = - fair_periodic_timeout_usec / 4; - - } else { - /* Disable rate shaping and fairness */ - m_cmng_port->flags.cmng_vn_enable = 0; - m_cmng_port->flags.fairness_enable = 0; - m_cmng_port->flags.rate_shaping_enable = 0; - - DP(NETIF_MSG_IFUP, - "Single function mode minmax will be disabled\n"); - } - - /* Store it to internal memory */ - for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) - REG_WR(bp, BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4, - ((u32 *)(m_cmng_port))[i]); + /* we multiply by 1e3/8 to get bytes/msec. + We don't want the credits to pass a credit + of the t_fair*FAIR_MEM (algorithm resolution) */ + bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM; + /* since each tick is 4 usec */ + bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4; } -static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, - u32 wsum, u16 port_rate, - struct cmng_struct_per_port *m_cmng_port) +static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) { struct rate_shaping_vars_per_vn m_rs_vn; struct fairness_vars_per_vn m_fair_vn; @@ -2171,17 +2207,18 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, } else { vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100; - /* If FAIRNESS is enabled (not all min rates are zeroes) and + /* If fairness is enabled (not all min rates are zeroes) and if current min rate is zero - set it to 1. This is a requirement of the algorithm. */ - if ((vn_min_rate == 0) && wsum) + if (bp->vn_weight_sum && (vn_min_rate == 0)) vn_min_rate = DEF_MIN_RATE; vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) * 100; } - DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d " - "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum); + DP(NETIF_MSG_IFUP, + "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n", + func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn)); @@ -2193,55 +2230,20 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, m_rs_vn.vn_counter.quota = (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8; -#ifdef BNX2X_PER_PROT_QOS - /* per protocol counter */ - for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) { - /* maximal Mbps for this protocol */ - m_rs_vn.protocol_counters[protocol].rate = - protocol_max_rate[protocol]; - /* the quota in each timer period - - number of bytes transmitted in this period */ - m_rs_vn.protocol_counters[protocol].quota = - (u32)(rs_periodic_timeout_usec * - ((double)m_rs_vn. - protocol_counters[protocol].rate/8)); - } -#endif - - if (wsum) { + if (bp->vn_weight_sum) { /* credit for each period of the fairness algorithm: number of bytes in T_FAIR (the vn share the port rate). - wsum should not be larger than 10000, thus - T_FAIR_COEF / (8 * wsum) will always be grater than zero */ + vn_weight_sum should not be larger than 10000, thus + T_FAIR_COEF / (8 * vn_weight_sum) will always be greater + than zero */ m_fair_vn.vn_credit_delta = - max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))), - (u64)(m_cmng_port->fair_vars.fair_threshold * 2)); + max((u32)(vn_min_rate * (T_FAIR_COEF / + (8 * bp->vn_weight_sum))), + (u32)(bp->cmng.fair_vars.fair_threshold * 2)); DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", m_fair_vn.vn_credit_delta); } -#ifdef BNX2X_PER_PROT_QOS - do { - u32 protocolWeightSum = 0; - - for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) - protocolWeightSum += - drvInit.protocol_min_rate[protocol]; - /* per protocol counter - - NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */ - if (protocolWeightSum > 0) { - for (protocol = 0; - protocol < NUM_OF_PROTOCOLS; protocol++) - /* credit for each period of the - fairness algorithm - number of bytes in - T_FAIR (the protocol share the vn rate) */ - m_fair_vn.protocol_credit_delta[protocol] = - (u32)((vn_min_rate / 8) * t_fair * - protocol_min_rate / protocolWeightSum); - } - } while (0); -#endif - /* Store it to internal memory */ for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + @@ -2254,11 +2256,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func, ((u32 *)(&m_fair_vn))[i]); } + /* This function is called upon link interrupt */ static void bnx2x_link_attn(struct bnx2x *bp) { - int vn; - /* Make sure that we are synced with the current statistics */ bnx2x_stats_handle(bp, STATS_EVENT_STOP); @@ -2266,6 +2267,19 @@ static void bnx2x_link_attn(struct bnx2x *bp) if (bp->link_vars.link_up) { + /* dropless flow control */ + if (CHIP_IS_E1H(bp)) { + int port = BP_PORT(bp); + u32 pause_enabled = 0; < |