diff options
Diffstat (limited to 'drivers/net/bnx2.c')
-rw-r--r-- | drivers/net/bnx2.c | 1333 |
1 files changed, 971 insertions, 362 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 4e7b46e4487..34aebc6e758 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -1,6 +1,6 @@ /* bnx2.c: Broadcom NX2 network driver. * - * Copyright (c) 2004-2007 Broadcom Corporation + * Copyright (c) 2004-2008 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -52,12 +52,12 @@ #include "bnx2_fw.h" #include "bnx2_fw2.h" -#define FW_BUF_SIZE 0x8000 +#define FW_BUF_SIZE 0x10000 #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.6.9" -#define DRV_MODULE_RELDATE "December 8, 2007" +#define DRV_MODULE_VERSION "1.7.2" +#define DRV_MODULE_RELDATE "January 21, 2008" #define RUN_AT(x) (jiffies + (x)) @@ -226,7 +226,7 @@ static struct flash_spec flash_5709 = { MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); -static inline u32 bnx2_tx_avail(struct bnx2 *bp) +static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi) { u32 diff; @@ -235,7 +235,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp) /* The ring uses 256 indices for 255 entries, one of them * needs to be skipped. */ - diff = bp->tx_prod - bp->tx_cons; + diff = bp->tx_prod - bnapi->tx_cons; if (unlikely(diff >= TX_DESC_CNT)) { diff &= 0xffff; if (diff == TX_DESC_CNT) @@ -296,7 +296,7 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) u32 val1; int i, ret; - if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; @@ -334,7 +334,7 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) ret = 0; } - if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; @@ -353,7 +353,7 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) u32 val1; int i, ret; - if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; @@ -383,7 +383,7 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) else ret = 0; - if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; @@ -399,30 +399,65 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) static void bnx2_disable_int(struct bnx2 *bp) { - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + int i; + struct bnx2_napi *bnapi; + + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT); + } REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); } static void bnx2_enable_int(struct bnx2 *bp) { - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); + int i; + struct bnx2_napi *bnapi; - REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); + for (i = 0; i < bp->irq_nvecs; i++) { + bnapi = &bp->bnx2_napi[i]; + + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + BNX2_PCICFG_INT_ACK_CMD_MASK_INT | + bnapi->last_status_idx); + REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | + BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | + bnapi->last_status_idx); + } REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); } static void bnx2_disable_int_sync(struct bnx2 *bp) { + int i; + atomic_inc(&bp->intr_sem); bnx2_disable_int(bp); - synchronize_irq(bp->pdev->irq); + for (i = 0; i < bp->irq_nvecs; i++) + synchronize_irq(bp->irq_tbl[i].vector); +} + +static void +bnx2_napi_disable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_disable(&bp->bnx2_napi[i].napi); +} + +static void +bnx2_napi_enable(struct bnx2 *bp) +{ + int i; + + for (i = 0; i < bp->irq_nvecs; i++) + napi_enable(&bp->bnx2_napi[i].napi); } static void @@ -430,7 +465,7 @@ bnx2_netif_stop(struct bnx2 *bp) { bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { - napi_disable(&bp->napi); + bnx2_napi_disable(bp); netif_tx_disable(bp->dev); bp->dev->trans_start = jiffies; /* prevent tx timeout */ } @@ -442,7 +477,7 @@ bnx2_netif_start(struct bnx2 *bp) if (atomic_dec_and_test(&bp->intr_sem)) { if (netif_running(bp->dev)) { netif_wake_queue(bp->dev); - napi_enable(&bp->napi); + bnx2_napi_enable(bp); bnx2_enable_int(bp); } } @@ -468,8 +503,7 @@ bnx2_free_mem(struct bnx2 *bp) bp->stats_blk = NULL; } if (bp->tx_desc_ring) { - pci_free_consistent(bp->pdev, - sizeof(struct tx_bd) * TX_DESC_CNT, + pci_free_consistent(bp->pdev, TXBD_RING_SIZE, bp->tx_desc_ring, bp->tx_desc_mapping); bp->tx_desc_ring = NULL; } @@ -477,14 +511,23 @@ bnx2_free_mem(struct bnx2 *bp) bp->tx_buf_ring = NULL; for (i = 0; i < bp->rx_max_ring; i++) { if (bp->rx_desc_ring[i]) - pci_free_consistent(bp->pdev, - sizeof(struct rx_bd) * RX_DESC_CNT, + pci_free_consistent(bp->pdev, RXBD_RING_SIZE, bp->rx_desc_ring[i], bp->rx_desc_mapping[i]); bp->rx_desc_ring[i] = NULL; } vfree(bp->rx_buf_ring); bp->rx_buf_ring = NULL; + for (i = 0; i < bp->rx_max_pg_ring; i++) { + if (bp->rx_pg_desc_ring[i]) + pci_free_consistent(bp->pdev, RXBD_RING_SIZE, + bp->rx_pg_desc_ring[i], + bp->rx_pg_desc_mapping[i]); + bp->rx_pg_desc_ring[i] = NULL; + } + if (bp->rx_pg_ring) + vfree(bp->rx_pg_ring); + bp->rx_pg_ring = NULL; } static int @@ -492,38 +535,54 @@ bnx2_alloc_mem(struct bnx2 *bp) { int i, status_blk_size; - bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT, - GFP_KERNEL); + bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); if (bp->tx_buf_ring == NULL) return -ENOMEM; - bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, - sizeof(struct tx_bd) * - TX_DESC_CNT, + bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE, &bp->tx_desc_mapping); if (bp->tx_desc_ring == NULL) goto alloc_mem_err; - bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT * - bp->rx_max_ring); + bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); if (bp->rx_buf_ring == NULL) goto alloc_mem_err; - memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT * - bp->rx_max_ring); + memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring); for (i = 0; i < bp->rx_max_ring; i++) { bp->rx_desc_ring[i] = - pci_alloc_consistent(bp->pdev, - sizeof(struct rx_bd) * RX_DESC_CNT, + pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, &bp->rx_desc_mapping[i]); if (bp->rx_desc_ring[i] == NULL) goto alloc_mem_err; } + if (bp->rx_pg_ring_size) { + bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * + bp->rx_max_pg_ring); + if (bp->rx_pg_ring == NULL) + goto alloc_mem_err; + + memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE * + bp->rx_max_pg_ring); + } + + for (i = 0; i < bp->rx_max_pg_ring; i++) { + bp->rx_pg_desc_ring[i] = + pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE, + &bp->rx_pg_desc_mapping[i]); + if (bp->rx_pg_desc_ring[i] == NULL) + goto alloc_mem_err; + + } + /* Combine status and statistics blocks into one allocation. */ status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + if (bp->flags & BNX2_FLAG_MSIX_CAP) + status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * + BNX2_SBLK_MSIX_ALIGN_SIZE); bp->status_stats_size = status_blk_size + sizeof(struct statistics_block); @@ -534,6 +593,18 @@ bnx2_alloc_mem(struct bnx2 *bp) memset(bp->status_blk, 0, bp->status_stats_size); + bp->bnx2_napi[0].status_blk = bp->status_blk; + if (bp->flags & BNX2_FLAG_MSIX_CAP) { + for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) { + struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; + + bnapi->status_blk_msix = (void *) + ((unsigned long) bp->status_blk + + BNX2_SBLK_MSIX_ALIGN_SIZE * i); + bnapi->int_num = i << 24; + } + } + bp->stats_blk = (void *) ((unsigned long) bp->status_blk + status_blk_size); @@ -563,7 +634,7 @@ bnx2_report_fw_link(struct bnx2 *bp) { u32 fw_link_status = 0; - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) return; if (bp->link_up) { @@ -605,7 +676,7 @@ bnx2_report_fw_link(struct bnx2 *bp) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr); if (!(bmsr & BMSR_ANEGCOMPLETE) || - bp->phy_flags & PHY_PARALLEL_DETECT_FLAG) + bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET; else fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE; @@ -621,7 +692,7 @@ static char * bnx2_xceiver_str(struct bnx2 *bp) { return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : - ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" : + ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : "Copper")); } @@ -681,7 +752,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp) return; } - if ((bp->phy_flags & PHY_SERDES_FLAG) && + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && (CHIP_NUM(bp) == CHIP_NUM_5708)) { u32 val; @@ -696,7 +767,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp) bnx2_read_phy(bp, bp->mii_adv, &local_adv); bnx2_read_phy(bp, bp->mii_lpa, &remote_adv); - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { u32 new_local_adv = 0; u32 new_remote_adv = 0; @@ -979,7 +1050,7 @@ bnx2_set_mac_link(struct bnx2 *bp) static void bnx2_enable_bmsr1(struct bnx2 *bp) { - if ((bp->phy_flags & PHY_SERDES_FLAG) && + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && (CHIP_NUM(bp) == CHIP_NUM_5709)) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS); @@ -988,7 +1059,7 @@ bnx2_enable_bmsr1(struct bnx2 *bp) static void bnx2_disable_bmsr1(struct bnx2 *bp) { - if ((bp->phy_flags & PHY_SERDES_FLAG) && + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && (CHIP_NUM(bp) == CHIP_NUM_5709)) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0); @@ -1000,7 +1071,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp) u32 up1; int ret = 1; - if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return 0; if (bp->autoneg & AUTONEG_SPEED) @@ -1029,7 +1100,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) u32 up1; int ret = 0; - if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return 0; if (CHIP_NUM(bp) == CHIP_NUM_5709) @@ -1054,7 +1125,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) { u32 bmcr; - if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; if (CHIP_NUM(bp) == CHIP_NUM_5709) { @@ -1089,7 +1160,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) { u32 bmcr; - if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)) + if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) return; if (CHIP_NUM(bp) == CHIP_NUM_5709) { @@ -1115,6 +1186,19 @@ bnx2_disable_forced_2g5(struct bnx2 *bp) bnx2_write_phy(bp, bp->mii_bmcr, bmcr); } +static void +bnx2_5706s_force_link_dn(struct bnx2 *bp, int start) +{ + u32 val; + + bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL); + bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); + if (start) + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f); + else + bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0); +} + static int bnx2_set_link(struct bnx2 *bp) { @@ -1126,7 +1210,7 @@ bnx2_set_link(struct bnx2 *bp) return 0; } - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) return 0; link_up = bp->link_up; @@ -1136,10 +1220,14 @@ bnx2_set_link(struct bnx2 *bp) bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr); bnx2_disable_bmsr1(bp); - if ((bp->phy_flags & PHY_SERDES_FLAG) && + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && (CHIP_NUM(bp) == CHIP_NUM_5706)) { u32 val; + if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { + bnx2_5706s_force_link_dn(bp, 0); + bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; + } val = REG_RD(bp, BNX2_EMAC_STATUS); if (val & BNX2_EMAC_STATUS_LINK) bmsr |= BMSR_LSTATUS; @@ -1150,7 +1238,7 @@ bnx2_set_link(struct bnx2 *bp) if (bmsr & BMSR_LSTATUS) { bp->link_up = 1; - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { if (CHIP_NUM(bp) == CHIP_NUM_5706) bnx2_5706s_linkup(bp); else if (CHIP_NUM(bp) == CHIP_NUM_5708) @@ -1164,11 +1252,19 @@ bnx2_set_link(struct bnx2 *bp) bnx2_resolve_flow_ctrl(bp); } else { - if ((bp->phy_flags & PHY_SERDES_FLAG) && + if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && (bp->autoneg & AUTONEG_SPEED)) bnx2_disable_forced_2g5(bp); - bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; + if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) { + u32 bmcr; + + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bmcr |= BMCR_ANENABLE; + bnx2_write_phy(bp, bp->mii_bmcr, bmcr); + + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; + } bp->link_up = 0; } @@ -1213,7 +1309,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp) if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) == (FLOW_CTRL_RX | FLOW_CTRL_TX)) { - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { adv = ADVERTISE_1000XPAUSE; } else { @@ -1221,7 +1317,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp) } } else if (bp->req_flow_ctrl & FLOW_CTRL_TX) { - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { adv = ADVERTISE_1000XPSE_ASYM; } else { @@ -1229,7 +1325,7 @@ bnx2_phy_get_pause_adv(struct bnx2 *bp) } } else if (bp->req_flow_ctrl & FLOW_CTRL_RX) { - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; } else { @@ -1304,7 +1400,7 @@ bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) u32 adv, bmcr; u32 new_adv = 0; - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) return (bnx2_setup_remote_phy(bp, port)); if (!(bp->autoneg & AUTONEG_SPEED)) { @@ -1414,7 +1510,7 @@ bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port) } #define ETHTOOL_ALL_FIBRE_SPEED \ - (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \ + (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \ (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\ (ADVERTISED_1000baseT_Full) @@ -1478,12 +1574,12 @@ bnx2_set_default_remote_link(struct bnx2 *bp) static void bnx2_set_default_link(struct bnx2 *bp) { - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) return bnx2_set_default_remote_link(bp); bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL; bp->req_line_speed = 0; - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { u32 reg; bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg; @@ -1713,7 +1809,7 @@ bnx2_setup_phy(struct bnx2 *bp, u8 port) if (bp->loopback == MAC_LOOPBACK) return 0; - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { return (bnx2_setup_serdes_phy(bp, port)); } else { @@ -1748,7 +1844,7 @@ bnx2_init_5709s_phy(struct bnx2 *bp) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val); - if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) val |= BCM5708S_UP1_2G5; else val &= ~BCM5708S_UP1_2G5; @@ -1791,7 +1887,7 @@ bnx2_init_5708s_phy(struct bnx2 *bp) val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN; bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val); - if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) { bnx2_read_phy(bp, BCM5708S_UP1, &val); val |= BCM5708S_UP1_2G5; bnx2_write_phy(bp, BCM5708S_UP1, val); @@ -1833,7 +1929,7 @@ bnx2_init_5706s_phy(struct bnx2 *bp) { bnx2_reset_phy(bp); - bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG; + bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; if (CHIP_NUM(bp) == CHIP_NUM_5706) REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); @@ -1872,7 +1968,7 @@ bnx2_init_copper_phy(struct bnx2 *bp) bnx2_reset_phy(bp); - if (bp->phy_flags & PHY_CRC_FIX_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) { bnx2_write_phy(bp, 0x18, 0x0c00); bnx2_write_phy(bp, 0x17, 0x000a); bnx2_write_phy(bp, 0x15, 0x310b); @@ -1883,7 +1979,7 @@ bnx2_init_copper_phy(struct bnx2 *bp) bnx2_write_phy(bp, 0x18, 0x0400); } - if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) { bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_BNX2_DSP_EXPAND_REG | 0x8); bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val); @@ -1923,8 +2019,8 @@ bnx2_init_phy(struct bnx2 *bp) u32 val; int rc = 0; - bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG; - bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG; + bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK; + bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY; bp->mii_bmcr = MII_BMCR; bp->mii_bmsr = MII_BMSR; @@ -1934,7 +2030,7 @@ bnx2_init_phy(struct bnx2 *bp) REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); - if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) + if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) goto setup_phy; bnx2_read_phy(bp, MII_PHYSID1, &val); @@ -1942,7 +2038,7 @@ bnx2_init_phy(struct bnx2 *bp) bnx2_read_phy(bp, MII_PHYSID2, &val); bp->phy_id |= val & 0xffff; - if (bp->phy_flags & PHY_SERDES_FLAG) { + if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { if (CHIP_NUM(bp) == CHIP_NUM_5706) rc = bnx2_init_5706s_phy(bp); else if (CHIP_NUM(bp) == CHIP_NUM_5708) @@ -2125,15 +2221,12 @@ bnx2_init_context(struct bnx2 *bp) vcid_addr += (i << PHY_CTX_SHIFT); pcid_addr += (i << PHY_CTX_SHIFT); - REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); + REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); /* Zero out the context. */ for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) - CTX_WR(bp, 0x00, offset, 0); - - REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); - REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); + CTX_WR(bp, vcid_addr, offset, 0); } } } @@ -2206,7 +2299,43 @@ bnx2_set_mac_addr(struct bnx2 *bp) } static inline int -bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) +bnx2_alloc_rx_page(struct bnx2 *bp, u16 index) +{ + dma_addr_t mapping; + struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; + struct rx_bd *rxbd = + &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + return -ENOMEM; + mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + rx_pg->page = page; + pci_unmap_addr_set(rx_pg, mapping, mapping); + rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; + rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; + return 0; +} + +static void +bnx2_free_rx_page(struct bnx2 *bp, u16 index) +{ + struct sw_pg *rx_pg = &bp->rx_pg_ring[index]; + struct page *page = rx_pg->page; + + if (!page) + return; + + pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, + PCI_DMA_FROMDEVICE); + + __free_page(page); + rx_pg->page = NULL; +} + +static inline int +bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index) { struct sk_buff *skb; struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; @@ -2231,15 +2360,15 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; - bp->rx_prod_bseq += bp->rx_buf_use_size; + bnapi->rx_prod_bseq += bp->rx_buf_use_size; return 0; } static int -bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) +bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) { - struct status_block *sblk = bp->status_blk; + struct status_block *sblk = bnapi->status_blk; u32 new_link_state, old_link_state; int is_set = 1; @@ -2257,30 +2386,41 @@ bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) } static void -bnx2_phy_int(struct bnx2 *bp) +bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) { - if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) { + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) { spin_lock(&bp->phy_lock); bnx2_set_link(bp); spin_unlock(&bp->phy_lock); } - if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) + if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) bnx2_set_remote_link(bp); } -static void -bnx2_tx_int(struct bnx2 *bp) +static inline u16 +bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) +{ + u16 cons; + + if (bnapi->int_num == 0) + cons = bnapi->status_blk->status_tx_quick_consumer_index0; + else + cons = bnapi->status_blk_msix->status_tx_quick_consumer_index; + + if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) + cons++; + return cons; +} + +static int +bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) { - struct status_block *sblk = bp->status_blk; u16 hw_cons, sw_cons, sw_ring_cons; - int tx_free_bd = 0; + int tx_pkt = 0; - hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0; - if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { - hw_cons++; - } - sw_cons = bp->tx_cons; + hw_cons = bnx2_get_hw_tx_cons(bnapi); + sw_cons = bnapi->tx_cons; while (sw_cons != hw_cons) { struct sw_bd *tx_buf; @@ -2327,19 +2467,16 @@ bnx2_tx_int(struct bnx2 *bp) sw_cons = NEXT_TX_BD(sw_cons); - tx_free_bd += last + 1; - dev_kfree_skb(skb); + tx_pkt++; + if (tx_pkt == budget) + break; - hw_cons = bp->hw_tx_cons = - sblk->status_tx_quick_consumer_index0; - - if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { - hw_cons++; - } + hw_cons = bnx2_get_hw_tx_cons(bnapi); } - bp->tx_cons = sw_cons; + bnapi->hw_tx_cons = hw_cons; + bnapi->tx_cons = sw_cons; /* Need to make the tx_cons update visible to bnx2_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that bnx2_start_xmit() @@ -2348,17 +2485,68 @@ bnx2_tx_int(struct bnx2 *bp) smp_mb(); if (unlikely(netif_queue_stopped(bp->dev)) && - (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { + (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) { netif_tx_lock(bp->dev); if ((netif_queue_stopped(bp->dev)) && - (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) + (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) netif_wake_queue(bp->dev); netif_tx_unlock(bp->dev); } + return tx_pkt; +} + +static void +bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi, + struct sk_buff *skb, int count) +{ + struct sw_pg *cons_rx_pg, *prod_rx_pg; + struct rx_bd *cons_bd, *prod_bd; + dma_addr_t mapping; + int i; + u16 hw_prod = bnapi->rx_pg_prod, prod; + u16 cons = bnapi->rx_pg_cons; + + for (i = 0; i < count; i++) { + prod = RX_PG_RING_IDX(hw_prod); + + prod_rx_pg = &bp->rx_pg_ring[prod]; + cons_rx_pg = &bp->rx_pg_ring[cons]; + cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; + prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; + + if (i == 0 && skb) { + struct page *page; + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + shinfo->nr_frags--; + page = shinfo->frags[shinfo->nr_frags].page; + shinfo->frags[shinfo->nr_frags].page = NULL; + mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + cons_rx_pg->page = page; + pci_unmap_addr_set(cons_rx_pg, mapping, mapping); + dev_kfree_skb(skb); + } + if (prod != cons) { + prod_rx_pg->page = cons_rx_pg->page; + cons_rx_pg->page = NULL; + pci_unmap_addr_set(prod_rx_pg, mapping, + pci_unmap_addr(cons_rx_pg, mapping)); + + prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; + prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; + + } + cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); + hw_prod = NEXT_RX_BD(hw_prod); + } + bnapi->rx_pg_prod = hw_prod; + bnapi->rx_pg_cons = cons; } static inline void -bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, +bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, u16 cons, u16 prod) { struct sw_bd *cons_rx_buf, *prod_rx_buf; @@ -2371,7 +2559,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, pci_unmap_addr(cons_rx_buf, mapping), bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); - bp->rx_prod_bseq += bp->rx_buf_use_size; + bnapi->rx_prod_bseq += bp->rx_buf_use_size; prod_rx_buf->skb = skb; @@ -2387,10 +2575,102 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; } +static int +bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb, + unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr, + u32 ring_idx) +{ + int err; + u16 prod = ring_idx & 0xffff; + + err = bnx2_alloc_rx_skb(bp, bnapi, prod); + if (unlikely(err)) { + bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod); + if (hdr_len) { + unsigned int raw_len = len + 4; + int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT; + + bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages); + } + return err; + } + + skb_reserve(skb, bp->rx_offset); + pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, + PCI_DMA_FROMDEVICE); + + if (hdr_len == 0) { + skb_put(skb, len); + return 0; + } else { + unsigned int i, frag_len, frag_size, pages; + struct sw_pg *rx_pg; + u16 pg_cons = bnapi->rx_pg_cons; + u16 pg_prod = bnapi->rx_pg_prod; + + frag_size = len + 4 - hdr_len; + pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT; + skb_put(skb, hdr_len); + + for (i = 0; i < pages; i++) { + frag_len = min(frag_size, (unsigned int) PAGE_SIZE); + if (unlikely(frag_len <= 4)) { + unsigned int tail = 4 - frag_len; + + bnapi->rx_pg_cons = pg_cons; + bnapi->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, + pages - i); + skb->len -= tail; + if (i == 0) { + skb->tail -= tail; + } else { + skb_frag_t *frag = + &skb_shinfo(skb)->frags[i - 1]; + frag->size -= tail; + skb->data_len -= tail; + skb->truesize -= tail; + } + return 0; + } + rx_pg = &bp->rx_pg_ring[pg_cons]; + + pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), + PAGE_SIZE, PCI_DMA_FROMDEVICE); + + if (i == pages - 1) + frag_len -= 4; + + skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len); + rx_pg->page = NULL; + + err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod)); + if (unlikely(err)) { + bnapi->rx_pg_cons = pg_cons; + bnapi->rx_pg_prod = pg_prod; + bnx2_reuse_rx_skb_pages(bp, bnapi, skb, + pages - i); + return err; + } + + frag_size -= frag_len; + skb->data_len += frag_len; + skb->truesize += frag_len; + skb->len += frag_len; + + pg_prod = NEXT_RX_BD(pg_prod); + pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); + } + bnapi->rx_pg_prod = pg_prod; + bnapi->rx_pg_cons = pg_cons; + } + return 0; +} + static inline u16 -bnx2_get_hw_rx_cons(struct bnx2 *bp) +bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) { - u16 cons = bp->status_blk->status_rx_quick_consumer_index0; + u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0; if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) cons++; @@ -2398,22 +2678,22 @@ bnx2_get_hw_rx_cons(struct bnx2 *bp) } static int -bnx2_rx_int(struct bnx2 *bp, int budget) +bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) { u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; struct l2_fhdr *rx_hdr; - int rx_pkt = 0; + int rx_pkt = 0, pg_ring_used = 0; - hw_cons = bnx2_get_hw_rx_cons(bp); - sw_cons = bp->rx_cons; - sw_prod = bp->rx_prod; + hw_cons = bnx2_get_hw_rx_cons(bnapi); + sw_cons = bnapi->rx_cons; + sw_prod = bnapi->rx_prod; /* Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); while (sw_cons != hw_cons) { - unsigned int len; + unsigned int len, hdr_len; u32 status; struct sw_bd *rx_buf; struct sk_buff *skb; @@ -2433,7 +2713,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); rx_hdr = (struct l2_fhdr *) skb->data; - len = rx_hdr->l2_fhdr_pkt_len - 4; + len = rx_hdr->l2_fhdr_pkt_len; if ((status = rx_hdr->l2_fhdr_status) & (L2_FHDR_ERRORS_BAD_CRC | @@ -2442,18 +2722,30 @@ bnx2_rx_int(struct bnx2 *bp, int budget) L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { - goto reuse_rx; + bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, + sw_ring_prod); + goto next_rx; + } + hdr_len = 0; + if (status & L2_FHDR_STATUS_SPLIT) { + hdr_len = rx_hdr->l2_fhdr_ip_xsum; + pg_ring_used = 1; + } else if (len > bp->rx_jumbo_thresh) { + hdr_len = bp->rx_jumbo_thresh; + pg_ring_used = 1; } - /* Since we don't have a jumbo ring, copy small packets - * if mtu > 1500 - */ - if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { + len -= 4; + + if (len <= bp->rx_copy_thresh) { struct sk_buff *new_skb; new_skb = netdev_alloc_skb(bp->dev, len + 2); - if (new_skb == NULL) - goto reuse_rx; + if (new_skb == NULL) { + bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, + sw_ring_prod); + goto next_rx; + } /* aligned copy */ skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, @@ -2461,24 +2753,13 @@ bnx2_rx_int(struct bnx2 *bp, int budget) skb_reserve(new_skb, 2); skb_put(new_skb, len); - bnx2_reuse_rx_skb(bp, skb, + bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons, sw_ring_prod); skb = new_skb; - } - else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { - pci_unmap_single(bp->pdev, dma_addr, - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); - - skb_reserve(skb, bp->rx_offset); - skb_put(skb, len); - } - else { -reuse_rx: - bnx2_reuse_rx_skb(bp, skb, - sw_ring_cons, sw_ring_prod); + } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len, + dma_addr, (sw_ring_cons << 16) | sw_ring_prod))) goto next_rx; - } skb->protocol = eth_type_trans(skb, bp->dev); @@ -2501,7 +2782,7 @@ reuse_rx: } #ifdef BCM_VLAN - if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) { + if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && bp->vlgrp) { vlan_hwaccel_receive_skb(skb, bp->vlgrp, rx_hdr->l2_fhdr_vlan_tag); } @@ -2521,16 +2802,20 @@ next_rx: /* Refresh hw_cons to see if there is new work */ if (sw_cons == hw_cons) { - hw_cons = bnx2_get_hw_rx_cons(bp); + hw_cons = bnx2_get_hw_rx_cons(bnapi); rmb(); } } - bp->rx_cons = sw_cons; - bp->rx_prod = sw_prod; + bnapi->rx_cons = sw_cons; + bnapi->rx_prod = sw_prod; + + if (pg_ring_used) + REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, + bnapi->rx_pg_prod); REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); - REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); + REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq); mmiowb(); @@ -2546,8 +2831,9 @@ bnx2_msi(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - prefetch(bp->status_blk); + prefetch(bnapi->status_blk); REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); @@ -2556,7 +2842,7 @@ bnx2_msi(int irq, void *dev_instance) if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev, &bp->napi); + netif_rx_schedule(dev, &bnapi->napi); return IRQ_HANDLED; } @@ -2566,14 +2852,15 @@ bnx2_msi_1shot(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); + struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - prefetch(bp->status_blk); + prefetch(bnapi->status_blk); /* Return here if interrupt is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; - netif_rx_schedule(dev, &bp->napi); + netif_rx_schedule(dev, &bnapi->napi); return IRQ_HANDLED; } @@ -2583,7 +2870,8 @@ bnx2_interrupt(int irq, void *dev_instance) { struct net_device *de |