diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/tg3.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 743 |
1 files changed, 433 insertions, 310 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index bf4074167d6..a1f2e0fed78 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 121 +#define TG3_MIN_NUM 122 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "November 2, 2011" +#define DRV_MODULE_RELDATE "December 7, 2011" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) (tg3_flag(tp, LRG_PROD_RING_CAP) ? \ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700) #define TG3_DEF_RX_JUMBO_RING_PENDING 100 -#define TG3_RSS_INDIR_TBL_SIZE 128 /* Do not place this n-ring entries value into the tp struct itself, * we really want to expose these constants to GCC so that modulo et @@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #if (NET_IP_ALIGN != 0) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset) #else -#define TG3_RX_OFFSET(tp) 0 +#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) #endif /* minimum number of free TX descriptors required to wake up TX process */ #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) -#define TG3_TX_BD_DMA_MAX 4096 +#define TG3_TX_BD_DMA_MAX_2K 2048 +#define TG3_TX_BD_DMA_MAX_4K 4096 #define TG3_RAW_IP_ALIGN 2 @@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp) } } -static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl) -{ - u16 miireg; - - if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX)) - miireg = ADVERTISE_PAUSE_CAP; - else if (flow_ctrl & FLOW_CTRL_TX) - miireg = ADVERTISE_PAUSE_ASYM; - else if (flow_ctrl & FLOW_CTRL_RX) - miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - else - miireg = 0; - - return miireg; -} - static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) { u16 miireg; @@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_1000XPAUSE) { - if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_1000XPAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_1000XPAUSE) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { - if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) + if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) { + if (lcladv & ADVERTISE_1000XPAUSE) + cap = FLOW_CTRL_RX; + if (rmtadv & ADVERTISE_1000XPAUSE) cap = FLOW_CTRL_TX; } @@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev) if (phydev->duplex == DUPLEX_HALF) mac_mode |= MAC_MODE_HALF_DUPLEX; else { - lcl_adv = tg3_advert_flowctrl_1000T( + lcl_adv = mii_advertise_flowctrl( tp->link_config.flowctrl); if (phydev->pause) @@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp) if (tp->link_config.active_speed == SPEED_1000 && (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && + tg3_flag(tp, 57765_CLASS)) && !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) { val = MII_TG3_DSP_TAP26_ALNOKO | MII_TG3_DSP_TAP26_RMRXSTO; @@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol) bool need_vaux = false; /* The GPIOs do something completely different on 57765. */ - if (!tg3_flag(tp, IS_NIC) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS)) return; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || @@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) u32 val, new_adv; new_adv = ADVERTISE_CSMA; - if (advertise & ADVERTISED_10baseT_Half) - new_adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - new_adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - new_adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - new_adv |= ADVERTISE_100FULL; - - new_adv |= tg3_advert_flowctrl_1000T(flowctrl); + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; + new_adv |= mii_advertise_flowctrl(flowctrl); err = tg3_writephy(tp, MII_ADVERTISE, new_adv); if (err) goto done; - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - goto done; - - new_adv = 0; - if (advertise & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000HALF; - if (advertise & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000FULL; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; - err = tg3_writephy(tp, MII_CTRL1000, new_adv); - if (err) - goto done; + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; + } if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) goto done; @@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { case ASIC_REV_5717: case ASIC_REV_57765: + case ASIC_REV_57766: case ASIC_REV_5719: /* If we advertised any eee advertisements above... */ if (val) @@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) return err; } -static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask) +static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) { - u32 adv_reg, all_mask = 0; + u32 advmsk, tgtadv, advertising; - if (mask & ADVERTISED_10baseT_Half) - all_mask |= ADVERTISE_10HALF; - if (mask & ADVERTISED_10baseT_Full) - all_mask |= ADVERTISE_10FULL; - if (mask & ADVERTISED_100baseT_Half) - all_mask |= ADVERTISE_100HALF; - if (mask & ADVERTISED_100baseT_Full) - all_mask |= ADVERTISE_100FULL; + advertising = tp->link_config.advertising; + tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; - if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg)) - return 0; + advmsk = ADVERTISE_ALL; + if (tp->link_config.active_duplex == DUPLEX_FULL) { + tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); + advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + } - if ((adv_reg & ADVERTISE_ALL) != all_mask) - return 0; + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return false; + + if ((*lcladv & advmsk) != tgtadv) + return false; if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { u32 tg3_ctrl; - all_mask = 0; - if (mask & ADVERTISED_1000baseT_Half) - all_mask |= ADVERTISE_1000HALF; - if (mask & ADVERTISED_1000baseT_Full) - all_mask |= ADVERTISE_1000FULL; + tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) - return 0; + return false; tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); - if (tg3_ctrl != all_mask) - return 0; + if (tg3_ctrl != tgtadv) + return false; } - return 1; + return true; } -static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv) +static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) { - u32 curadv, reqadv; - - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return 1; + u32 lpeth = 0; - curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); - reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl); + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 val; - if (tp->link_config.active_duplex == DUPLEX_FULL) { - if (curadv != reqadv) - return 0; + if (tg3_readphy(tp, MII_STAT1000, &val)) + return false; - if (tg3_flag(tp, PAUSE_AUTONEG)) - tg3_readphy(tp, MII_LPA, rmtadv); - } else { - /* Reprogram the advertisement register, even if it - * does not affect the current link. If the link - * gets renegotiated in the future, we can save an - * additional renegotiation cycle by advertising - * it correctly in the first place. - */ - if (curadv != reqadv) { - *lcladv &= ~(ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv); - } + lpeth = mii_stat1000_to_ethtool_lpa_t(val); } - return 1; + if (tg3_readphy(tp, MII_LPA, rmtadv)) + return false; + + lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); + tp->link_config.rmt_adv = lpeth; + + return true; } static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) @@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; + tp->link_config.rmt_adv = 0; if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { err = tg3_phy_auxctl_read(tp, @@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) if (tp->link_config.autoneg == AUTONEG_ENABLE) { if ((bmcr & BMCR_ANENABLE) && - tg3_copper_is_advertising_all(tp, - tp->link_config.advertising)) { - if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv, - &rmt_adv)) - current_link_up = 1; - } + tg3_phy_copper_an_config_ok(tp, &lcl_adv) && + tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) + current_link_up = 1; } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) } if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; + + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } + + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } } relink: @@ -4643,6 +4606,9 @@ restart_autoneg: if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; tp->serdes_counter = 0; @@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) if (rxflags & MR_LP_ADV_ASYM_PAUSE) remote_adv |= LPA_1000XPAUSE_ASYM; + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); current_link_up = 1; @@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) udelay(40); current_link_up = 0; + tp->link_config.rmt_adv = 0; mac_status = tr32(MAC_STATUS); if (tg3_flag(tp, HW_AUTONEG)) @@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_link_up = 0; current_speed = SPEED_INVALID; current_duplex = DUPLEX_INVALID; + tp->link_config.rmt_adv = 0; err |= tg3_readphy(tp, MII_BMSR, &bmsr); err |= tg3_readphy(tp, MII_BMSR, &bmsr); @@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { /* do nothing, just check for link up at the end */ } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, new_adv; + u32 adv, newadv; err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); - - new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); - if (tp->link_config.advertising & ADVERTISED_1000baseT_Half) - new_adv |= ADVERTISE_1000XHALF; - if (tp->link_config.advertising & ADVERTISED_1000baseT_Full) - new_adv |= ADVERTISE_1000XFULL; + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); - if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, new_adv); + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; tg3_writephy(tp, MII_BMCR, bmcr); @@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) current_duplex = DUPLEX_FULL; else current_duplex = DUPLEX_HALF; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); } else if (!tg3_flag(tp, 5780_CLASS)) { /* Link is up via parallel detect */ } else { @@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi) u32 sw_idx = tnapi->tx_cons; struct netdev_queue *txq; int index = tnapi - tp->napi; + unsigned int pkts_compl = 0, bytes_compl = 0; if (tg3_flag(tp, ENABLE_TSS)) index--; @@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi) sw_idx = NEXT_TX(sw_idx); } + pkts_compl++; + bytes_compl += skb->len; + dev_kfree_skb(skb); if (unlikely(tx_bug)) { @@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi) } } + netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); + tnapi->tx_cons = sw_idx; /* Need to make the tx_cons update visible to tg3_start_xmit() @@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi) } } -static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { - if (!ri->skb) + if (!ri->data) return; pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), map_sz, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any(ri->skb); - ri->skb = NULL; + kfree(ri->data); + ri->data = NULL; } /* Returns size of skb allocated or < 0 on error. @@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) * buffers the cpu only reads the last cacheline of the RX descriptor * (to fetch the error flags, vlan tag, checksum, and opaque cookie). */ -static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, +static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, u32 opaque_key, u32 dest_idx_unmasked) { struct tg3_rx_buffer_desc *desc; struct ring_info *map; - struct sk_buff *skb; + u8 *data; dma_addr_t mapping; - int skb_size, dest_idx; + int skb_size, data_size, dest_idx; switch (opaque_key) { case RXD_OPAQUE_RING_STD: dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; desc = &tpr->rx_std[dest_idx]; map = &tpr->rx_std_buffers[dest_idx]; - skb_size = tp->rx_pkt_map_sz; + data_size = tp->rx_pkt_map_sz; break; case RXD_OPAQUE_RING_JUMBO: dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; desc = &tpr->rx_jmb[dest_idx].std; map = &tpr->rx_jmb_buffers[dest_idx]; - skb_size = TG3_RX_JMB_MAP_SZ; + data_size = TG3_RX_JMB_MAP_SZ; break; default: @@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, * Callers depend upon this behavior and assume that * we leave everything unchanged if we fail. */ - skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp)); - if (skb == NULL) + skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc(skb_size, GFP_ATOMIC); + if (!data) return -ENOMEM; - skb_reserve(skb, TG3_RX_OFFSET(tp)); - - mapping = pci_map_single(tp->pdev, skb->data, skb_size, + mapping = pci_map_single(tp->pdev, + data + TG3_RX_OFFSET(tp), + data_size, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pdev, mapping)) { - dev_kfree_skb(skb); + kfree(data); return -EIO; } - map->skb = skb; + map->data = data; dma_unmap_addr_set(map, mapping, mapping); desc->addr_hi = ((u64)mapping >> 32); desc->addr_lo = ((u64)mapping & 0xffffffff); - return skb_size; + return data_size; } /* We only need to move over in the address because the other * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_skb for full details. + * tg3_alloc_rx_data for full details. */ static void tg3_recycle_rx(struct tg3_napi *tnapi, struct tg3_rx_prodring_set *dpr, @@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, return; } - dest_map->skb = src_map->skb; + dest_map->data = src_map->data; dma_unmap_addr_set(dest_map, mapping, dma_unmap_addr(src_map, mapping)); dest_desc->addr_hi = src_desc->addr_hi; @@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, */ smp_wmb(); - src_map->skb = NULL; + src_map->data = NULL; } /* The RX ring scheme is composed of multiple rings which post fresh @@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) struct sk_buff *skb; dma_addr_t dma_addr; u32 opaque_key, desc_idx, *post_ptr; + u8 *data; desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; if (opaque_key == RXD_OPAQUE_RING_STD) { ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &std_prod_idx; rx_std_posted++; } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; dma_addr = dma_unmap_addr(ri, mapping); - skb = ri->skb; + data = ri->data; post_ptr = &jmb_prod_idx; } else goto next_pkt_nopost; @@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto next_pkt; } + prefetch(data + TG3_RX_OFFSET(tp)); len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - ETH_FCS_LEN; if (len > TG3_RX_COPY_THRESH(tp)) { int skb_size; - skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, + skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, *post_ptr); if (skb_size < 0) goto drop_it; @@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) pci_unmap_single(tp->pdev, dma_addr, skb_size, PCI_DMA_FROMDEVICE); - /* Ensure that the update to the skb happens + skb = build_skb(data); + if (!skb) { + kfree(data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); + /* Ensure that the update to the data happens * after the usage of the old DMA mapping. */ smp_wmb(); - ri->skb = NULL; + ri->data = NULL; - skb_put(skb, len); } else { - struct sk_buff *copy_skb; - tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); - copy_skb = netdev_alloc_skb(tp->dev, len + - TG3_RAW_IP_ALIGN); - if (copy_skb == NULL) + skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); + if (skb == NULL) goto drop_it_no_recycle; - skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); - skb_put(copy_skb, len); + skb_reserve(skb, TG3_RAW_IP_ALIGN); pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - skb_copy_from_linear_data(skb, copy_skb->data, len); + memcpy(skb->data, + data + TG3_RX_OFFSET(tp), + len); pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - - /* We'll reuse the original ring buffer. */ - skb = copy_skb; } + skb_put(skb, len); if ((tp->dev->features & NETIF_F_RXCSUM) && (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) @@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_std_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].skb) { + if (dpr->rx_std_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp, di = dpr->rx_jmb_prod_idx; for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].skb) { + if (dpr->rx_jmb_buffers[i].data) { cpycnt = i - di; err = -ENOSPC; break; @@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, bool hwbug = false; if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) - hwbug = 1; + hwbug = true; if (tg3_4g_overflow_test(map, len)) - hwbug = 1; + hwbug = true; if (tg3_40bit_overflow_test(tp, map, len)) - hwbug = 1; + hwbug = true; - if (tg3_flag(tp, 4K_FIFO_LIMIT)) { + if (tp->dma_limit) { u32 prvidx = *entry; u32 tmp_flag = flags & ~TXD_FLAG_END; - while (len > TG3_TX_BD_DMA_MAX && *budget) { - u32 frag_len = TG3_TX_BD_DMA_MAX; - len -= TG3_TX_BD_DMA_MAX; + while (len > tp->dma_limit && *budget) { + u32 frag_len = tp->dma_limit; + len -= tp->dma_limit; /* Avoid the 8byte DMA problem */ if (len <= 8) { - len += TG3_TX_BD_DMA_MAX / 2; - frag_len = TG3_TX_BD_DMA_MAX / 2; + len += tp->dma_limit / 2; + frag_len = tp->dma_limit / 2; } tnapi->tx_buffers[*entry].fragmented = true; @@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, *budget -= 1; *entry = NEXT_TX(*entry); } else { - hwbug = 1; + hwbug = true; tnapi->tx_buffers[prvidx].fragmented = false; } } @@ -6685,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) iph = ip_hdr(skb); tcp_opt_len = tcp_optlen(skb); - if (skb_is_gso_v6(skb)) { - hdr_len = skb_headlen(skb) - ETH_HLEN; - } else { - u32 ip_tcp_len; - - ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr); - hdr_len = ip_tcp_len + tcp_opt_len; + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + if (!skb_is_gso_v6(skb)) { iph->check = 0; iph->tot_len = htons(mss + hdr_len); } @@ -6816,6 +6793,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); + netdev_sent_queue(tp->dev, skb->len); /* Packets are ready, update Tx producer idx local and on card. */ tw32_tx_mbox(tnapi->prodmbox, entry); @@ -6968,7 +6946,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) return 0; } -static void tg3_set_loopback(struct net_device *dev, u32 features) +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -6994,7 +6972,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features) } } -static u32 tg3_fix_features(struct net_device *dev, u32 features) +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) { struct tg3 *tp = netdev_priv(dev); @@ -7004,9 +6983,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features) return features; } -static int tg3_set_features(struct net_device *dev, u32 features) +static int tg3_set_features(struct net_device *dev, netdev_features_t features) { - u32 changed = dev->features ^ features; + netdev_features_t changed = dev->features ^ features; if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) tg3_set_loopback(dev, features); @@ -7082,14 +7061,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp, if (tpr != &tp->napi[0].prodring) { for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE)) { for (i = tpr->rx_jmb_cons_idx; i != tpr->rx_jmb_prod_idx; i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7098,12 +7077,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp, } for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], tp->rx_pkt_map_sz); if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], TG3_RX_JMB_MAP_SZ); } } @@ -7159,7 +7138,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, /* Now allocate fresh SKBs for each rx ring. */ for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX standard ring. Only " "%d out of %d buffers were allocated " @@ -7191,7 +7170,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, } for (i = 0; i < tp->rx_jumbo_pending; i++) { - if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { netdev_warn(tp->dev, "Using a smaller RX jumbo ring. Only %d " "out of %d buffers were allocated " @@ -7297,6 +7276,7 @@ static void tg3_free_rings(struct tg3 *tp) dev_kfree_skb_any(skb); } } + netdev_reset_queue(tp->dev); } /* Initialize tx/rx rings for packet processing. @@ -7591,8 +7571,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent) if (tnapi->hw_status) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); } - if (tp->hw_stats) - memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); return err; } @@ -7626,15 +7604,11 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { - if (tg3_flag(tp, PCI_EXPRESS)) - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - else { - pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, - tp->pci_cacheline_sz); - pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, - tp->pci_lat_timer); - } + if (!tg3_flag(tp, PCI_EXPRESS)) { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); } /* Make sure PCI-X relaxed ordering bit is clear. */ @@ -7819,8 +7793,6 @@ static int tg3_chip_reset(struct tg3 *tp) pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL, val16); - pcie_set_readrq(tp->pdev, tp->pcie_readrq); - /* Clear error status */ pci_write_config_word(tp->pdev, pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA, @@ -7914,6 +7886,11 @@ static int tg3_chip_reset(struct tg3 *tp) return 0; } +static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *, + struct rtnl_link_stats64 *); +static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *, + struct tg3_ethtool_stats *); + /* tp->lock is held. */ static int tg3_halt(struct tg3 *tp, int kind, int silent) { @@ -7931,6 +7908,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent) tg3_write_sig_legacy(tp, kind); tg3_write_sig_post_reset(tp, kind); + if (tp->hw_stats) { + /* Save the stats across chip resets... */ + tg3_get_stats64(tp->dev, &tp->net_stats_prev), + tg3_get_estats(tp, &tp->estats_prev); + + /* And make sure the next sample is new data */ + memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats)); + } + if (err) return err; @@ -8074,7 +8060,7 @@ static void tg3_rings_reset(struct tg3 *tp) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; else if (tg3_flag(tp, 5717_PLUS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + else if (tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; else limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; @@ -8091,7 +8077,7 @@ static void tg3_rings_reset(struct tg3 *tp) else if (!tg3_flag(tp, 5705_PLUS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; else limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; @@ -8197,7 +8183,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, 5750_PLUS) || tg3_flag(tp, 5780_CLASS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + tg3_flag(tp, 57765_PLUS)) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700; else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) @@ -8217,10 +8204,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) return; - if (!tg3_flag(tp, 5705_PLUS)) - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; - else - bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717; + bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700; host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1); @@ -8231,6 +8215,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); } +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) +{ + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = + ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); +} + +static void tg3_rss_check_indir_tbl(struct tg3 *tp) +{ + int i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return; + + if (tp->irq_cnt <= 2) { + memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); + return; + } + + /* Validate table against current IRQ count */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) + break; + } + + if (i != TG3_RSS_INDIR_TBL_SIZE) + tg3_rss_init_dflt_indir_tbl(tp); +} + +static void tg3_rss_write_indir_tbl(struct tg3 *tp) +{ + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + u32 val = tp->rss_ind_tbl[i]; + i++; + for (; i % 8; i++) { + val <<= 4; + val |= tp->rss_ind_tbl[i]; + } + tw32(reg, val); + reg += 4; + } +} + /* tp->lock is held. */ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { @@ -8337,7 +8369,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(GRC_MODE, grc_mode); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { + if (tg3_flag(tp, 57765_CLASS)) { if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { u32 grc_mode = tr32(GRC_MODE); @@ -8425,7 +8457,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && + if (!tg3_flag(tp, 57765_CLASS) && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) val |= DMA_RWCTRL_TAGGED_STAT_WA; tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); @@ -8572,7 +8604,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, val | BDINFO_FLAGS_USE_EXT_RECV); if (!tg3_flag(tp, USE_JUMBO_BDFLAG) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) + tg3_flag(tp, 57765_CLASS)) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, NIC_SRAM_RX_JUMBO_BUFFER_DESC); } else { @@ -8581,10 +8613,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (tg3_flag(tp, 57765_PLUS)) { - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) - val = TG3_RX_STD_MAX_SIZE_5700; - else - val = TG3_RX_STD_MAX_SIZE_5717; + val = TG3_RX_STD_RING_SIZE(tp); val <<= BDINFO_FLAGS_MAXLEN_SHIFT; val |= (TG3_RX_STD_DMA_SZ << 2); } else @@ -8661,6 +8690,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; + if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) @@ -8809,9 +8841,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) { + if (tg3_flag(tp, USING_MSIX)) { val = tr32(MSGINT_MODE); - val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; + val |= MSGINT_MODE_ENABLE; + if (tp->irq_cnt > 1) + val |= MSGINT_MODE_MULTIVEC_EN; if (!tg3_flag(tp, 1SHOT_MSI)) val |= MSGINT_MODE_ONE_SHOT_DISABLE; tw32(MSGINT_MODE, val); @@ -8924,28 +8958,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) udelay(100); if (tg3_flag(tp, ENABLE_RSS)) { - int i = 0; - u32 reg = MAC_RSS_INDIR_TBL_0; - - if (tp->irq_cnt == 2) { - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) { - tw32(reg, 0x0); - reg += 4; - } - } else { - u32 val; - - while (i < TG3_RSS_INDIR_TBL_SIZE) { - val = i % (tp->irq_cnt - 1); - i++; - for (; i % 8; i++) { - val <<= 4; - val |= (i % (tp->irq_cnt - 1)); - } - tw32(reg, val); - reg += 4; - } - } + tg3_rss_write_indir_tbl(tp); /* Setu |