diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
45 files changed, 10218 insertions, 4155 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 2fa5b86f139..3e488094b07 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -23,6 +23,7 @@ config B44  	depends on SSB_POSSIBLE && HAS_DMA  	select SSB  	select MII +	select PHYLIB  	---help---  	  If you have a network (Ethernet) controller of this type, say Y  	  or M and read the Ethernet-HOWTO, available from @@ -59,6 +60,17 @@ config BCM63XX_ENET  	  This driver supports the ethernet MACs in the Broadcom 63xx  	  MIPS chipset family (BCM63XX). +config BCMGENET +	tristate "Broadcom GENET internal MAC support" +	depends on OF +	select MII +	select PHYLIB +	select FIXED_PHY if BCMGENET=y +	select BCM7XXX_PHY +	help +	  This driver supports the built-in Ethernet MACs found in the +	  Broadcom BCM7xxx Set Top Box family chipset. +  config BNX2  	tristate "Broadcom NetXtremeII support"  	depends on PCI @@ -138,4 +150,15 @@ config BGMAC  	  In case of using this driver on BCM4706 it's also requires to enable  	  BCMA_DRIVER_GMAC_CMN to make it work. +config SYSTEMPORT +	tristate "Broadcom SYSTEMPORT internal MAC support" +	depends on OF +	select MII +	select PHYLIB +	select FIXED_PHY if SYSTEMPORT=y +	help +	  This driver supports the built-in Ethernet MACs found in the +	  Broadcom BCM7xxx Set Top Box family chipset using an internal +	  Ethernet switch. +  endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 68efa1a3fb8..e2a958a657e 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -4,9 +4,11 @@  obj-$(CONFIG_B44) += b44.o  obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o +obj-$(CONFIG_BCMGENET) += genet/  obj-$(CONFIG_BNX2) += bnx2.o  obj-$(CONFIG_CNIC) += cnic.o  obj-$(CONFIG_BNX2X) += bnx2x/  obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o  obj-$(CONFIG_TIGON3) += tg3.o  obj-$(CONFIG_BGMAC) += bgmac.o +obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 9b017d9c58e..ca5a20a48b1 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -6,6 +6,7 @@   * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)   * Copyright (C) 2006 Broadcom Corporation.   * Copyright (C) 2007 Michael Buesch <m@bues.ch> + * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>   *   * Distribute under GPL.   */ @@ -29,6 +30,7 @@  #include <linux/dma-mapping.h>  #include <linux/ssb/ssb.h>  #include <linux/slab.h> +#include <linux/phy.h>  #include <asm/uaccess.h>  #include <asm/io.h> @@ -284,7 +286,7 @@ static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)  static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)  { -	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) +	if (bp->flags & B44_FLAG_EXTERNAL_PHY)  		return 0;  	return __b44_readphy(bp, bp->phy_addr, reg, val); @@ -292,14 +294,14 @@ static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)  static inline int b44_writephy(struct b44 *bp, int reg, u32 val)  { -	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) +	if (bp->flags & B44_FLAG_EXTERNAL_PHY)  		return 0;  	return __b44_writephy(bp, bp->phy_addr, reg, val);  }  /* miilib interface */ -static int b44_mii_read(struct net_device *dev, int phy_id, int location) +static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)  {  	u32 val;  	struct b44 *bp = netdev_priv(dev); @@ -309,19 +311,36 @@ static int b44_mii_read(struct net_device *dev, int phy_id, int location)  	return val;  } -static void b44_mii_write(struct net_device *dev, int phy_id, int location, -			 int val) +static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location, +			       int val)  {  	struct b44 *bp = netdev_priv(dev);  	__b44_writephy(bp, phy_id, location, val);  } +static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location) +{ +	u32 val; +	struct b44 *bp = bus->priv; +	int rc = __b44_readphy(bp, phy_id, location, &val); +	if (rc) +		return 0xffffffff; +	return val; +} + +static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location, +				 u16 val) +{ +	struct b44 *bp = bus->priv; +	return __b44_writephy(bp, phy_id, location, val); +} +  static int b44_phy_reset(struct b44 *bp)  {  	u32 val;  	int err; -	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) +	if (bp->flags & B44_FLAG_EXTERNAL_PHY)  		return 0;  	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);  	if (err) @@ -423,7 +442,7 @@ static int b44_setup_phy(struct b44 *bp)  	b44_wap54g10_workaround(bp); -	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) +	if (bp->flags & B44_FLAG_EXTERNAL_PHY)  		return 0;  	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)  		goto out; @@ -521,12 +540,14 @@ static void b44_check_phy(struct b44 *bp)  {  	u32 bmsr, aux; -	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {  		bp->flags |= B44_FLAG_100_BASE_T; -		bp->flags |= B44_FLAG_FULL_DUPLEX;  		if (!netif_carrier_ok(bp->dev)) {  			u32 val = br32(bp, B44_TX_CTRL); -			val |= TX_CTRL_DUPLEX; +			if (bp->flags & B44_FLAG_FULL_DUPLEX) +				val |= TX_CTRL_DUPLEX; +			else +				val &= ~TX_CTRL_DUPLEX;  			bw32(bp, B44_TX_CTRL, val);  			netif_carrier_on(bp->dev);  			b44_link_report(bp); @@ -596,6 +617,7 @@ static void b44_timer(unsigned long __opaque)  static void b44_tx(struct b44 *bp)  {  	u32 cur, cons; +	unsigned bytes_compl = 0, pkts_compl = 0;  	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;  	cur /= sizeof(struct dma_desc); @@ -612,9 +634,14 @@ static void b44_tx(struct b44 *bp)  				 skb->len,  				 DMA_TO_DEVICE);  		rp->skb = NULL; + +		bytes_compl += skb->len; +		pkts_compl++; +  		dev_kfree_skb_irq(skb);  	} +	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);  	bp->tx_cons = cons;  	if (netif_queue_stopped(bp->dev) &&  	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) @@ -1018,6 +1045,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)  	if (bp->flags & B44_FLAG_REORDER_BUG)  		br32(bp, B44_DMATX_PTR); +	netdev_sent_queue(dev, skb->len); +  	if (TX_BUFFS_AVAIL(bp) < 1)  		netif_stop_queue(dev); @@ -1307,7 +1336,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)  	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {  		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);  		br32(bp, B44_ENET_CTRL); -		bp->flags &= ~B44_FLAG_INTERNAL_PHY; +		bp->flags |= B44_FLAG_EXTERNAL_PHY;  	} else {  		u32 val = br32(bp, B44_DEVCTRL); @@ -1316,7 +1345,7 @@ static void b44_chip_reset(struct b44 *bp, int reset_kind)  			br32(bp, B44_DEVCTRL);  			udelay(100);  		} -		bp->flags |= B44_FLAG_INTERNAL_PHY; +		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;  	}  } @@ -1331,7 +1360,10 @@ static void b44_halt(struct b44 *bp)  	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);  	/* now reset the chip, but without enabling the MAC&PHY  	 * part of it. This has to be done _after_ we shut down the PHY */ -	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL); +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) +		b44_chip_reset(bp, B44_CHIP_RESET_FULL); +	else +		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);  }  /* bp->lock is held. */ @@ -1416,6 +1448,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)  	val = br32(bp, B44_ENET_CTRL);  	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); + +	netdev_reset_queue(bp->dev);  }  static int b44_open(struct net_device *dev) @@ -1450,6 +1484,10 @@ static int b44_open(struct net_device *dev)  	add_timer(&bp->timer);  	b44_enable_ints(bp); + +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) +		phy_start(bp->phydev); +  	netif_start_queue(dev);  out:  	return err; @@ -1612,6 +1650,9 @@ static int b44_close(struct net_device *dev)  	netif_stop_queue(dev); +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) +		phy_stop(bp->phydev); +  	napi_disable(&bp->napi);  	del_timer_sync(&bp->timer); @@ -1644,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,  	unsigned int start;  	do { -		start = u64_stats_fetch_begin_bh(&hwstat->syncp); +		start = u64_stats_fetch_begin_irq(&hwstat->syncp);  		/* Convert HW stats into rtnl_link_stats64 stats. */  		nstat->rx_packets = hwstat->rx_pkts; @@ -1678,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,  		/* Carrier lost counter seems to be broken for some devices */  		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;  #endif -	} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); +	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));  	return nstat;  } @@ -1795,6 +1836,11 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  {  	struct b44 *bp = netdev_priv(dev); +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) { +		BUG_ON(!bp->phydev); +		return phy_ethtool_gset(bp->phydev, cmd); +	} +  	cmd->supported = (SUPPORTED_Autoneg);  	cmd->supported |= (SUPPORTED_100baseT_Half |  			  SUPPORTED_100baseT_Full | @@ -1818,8 +1864,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  		DUPLEX_FULL : DUPLEX_HALF;  	cmd->port = 0;  	cmd->phy_address = bp->phy_addr; -	cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ? -		XCVR_INTERNAL : XCVR_EXTERNAL; +	cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ? +		XCVR_EXTERNAL : XCVR_INTERNAL;  	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?  		AUTONEG_DISABLE : AUTONEG_ENABLE;  	if (cmd->autoneg == AUTONEG_ENABLE) @@ -1836,7 +1882,23 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  {  	struct b44 *bp = netdev_priv(dev); -	u32 speed = ethtool_cmd_speed(cmd); +	u32 speed; +	int ret; + +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) { +		BUG_ON(!bp->phydev); +		spin_lock_irq(&bp->lock); +		if (netif_running(dev)) +			b44_setup_phy(bp); + +		ret = phy_ethtool_sset(bp->phydev, cmd); + +		spin_unlock_irq(&bp->lock); + +		return ret; +	} + +	speed = ethtool_cmd_speed(cmd);  	/* We do not support gigabit. */  	if (cmd->autoneg == AUTONEG_ENABLE) { @@ -2011,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,  	do {  		data_src = &hwstat->tx_good_octets;  		data_dst = data; -		start = u64_stats_fetch_begin_bh(&hwstat->syncp); +		start = u64_stats_fetch_begin_irq(&hwstat->syncp);  		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)  			*data_dst++ = *data_src++; -	} while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); +	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));  }  static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -2066,7 +2128,6 @@ static const struct ethtool_ops b44_ethtool_ops = {  static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  { -	struct mii_ioctl_data *data = if_mii(ifr);  	struct b44 *bp = netdev_priv(dev);  	int err = -EINVAL; @@ -2074,7 +2135,12 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  		goto out;  	spin_lock_irq(&bp->lock); -	err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL); +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) { +		BUG_ON(!bp->phydev); +		err = phy_mii_ioctl(bp->phydev, ifr, cmd); +	} else { +		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL); +	}  	spin_unlock_irq(&bp->lock);  out:  	return err; @@ -2101,7 +2167,7 @@ static int b44_get_invariants(struct b44 *bp)  	 * valid PHY address. */  	bp->phy_addr &= 0x1F; -	memcpy(bp->dev->dev_addr, addr, 6); +	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);  	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){  		pr_err("Invalid MAC address found in EEPROM\n"); @@ -2136,6 +2202,146 @@ static const struct net_device_ops b44_netdev_ops = {  #endif  }; +static void b44_adjust_link(struct net_device *dev) +{ +	struct b44 *bp = netdev_priv(dev); +	struct phy_device *phydev = bp->phydev; +	bool status_changed = 0; + +	BUG_ON(!phydev); + +	if (bp->old_link != phydev->link) { +		status_changed = 1; +		bp->old_link = phydev->link; +	} + +	/* reflect duplex change */ +	if (phydev->link) { +		if ((phydev->duplex == DUPLEX_HALF) && +		    (bp->flags & B44_FLAG_FULL_DUPLEX)) { +			status_changed = 1; +			bp->flags &= ~B44_FLAG_FULL_DUPLEX; +		} else if ((phydev->duplex == DUPLEX_FULL) && +			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) { +			status_changed = 1; +			bp->flags |= B44_FLAG_FULL_DUPLEX; +		} +	} + +	if (status_changed) { +		u32 val = br32(bp, B44_TX_CTRL); +		if (bp->flags & B44_FLAG_FULL_DUPLEX) +			val |= TX_CTRL_DUPLEX; +		else +			val &= ~TX_CTRL_DUPLEX; +		bw32(bp, B44_TX_CTRL, val); +		phy_print_status(phydev); +	} +} + +static int b44_register_phy_one(struct b44 *bp) +{ +	struct mii_bus *mii_bus; +	struct ssb_device *sdev = bp->sdev; +	struct phy_device *phydev; +	char bus_id[MII_BUS_ID_SIZE + 3]; +	struct ssb_sprom *sprom = &sdev->bus->sprom; +	int err; + +	mii_bus = mdiobus_alloc(); +	if (!mii_bus) { +		dev_err(sdev->dev, "mdiobus_alloc() failed\n"); +		err = -ENOMEM; +		goto err_out; +	} + +	mii_bus->priv = bp; +	mii_bus->read = b44_mdio_read_phylib; +	mii_bus->write = b44_mdio_write_phylib; +	mii_bus->name = "b44_eth_mii"; +	mii_bus->parent = sdev->dev; +	mii_bus->phy_mask = ~(1 << bp->phy_addr); +	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance); +	mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); +	if (!mii_bus->irq) { +		dev_err(sdev->dev, "mii_bus irq allocation failed\n"); +		err = -ENOMEM; +		goto err_out_mdiobus; +	} + +	memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR); + +	bp->mii_bus = mii_bus; + +	err = mdiobus_register(mii_bus); +	if (err) { +		dev_err(sdev->dev, "failed to register MII bus\n"); +		goto err_out_mdiobus_irq; +	} + +	if (!bp->mii_bus->phy_map[bp->phy_addr] && +	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) { + +		dev_info(sdev->dev, +			 "could not find PHY at %i, use fixed one\n", +			 bp->phy_addr); + +		bp->phy_addr = 0; +		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0", +			 bp->phy_addr); +	} else { +		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, +			 bp->phy_addr); +	} + +	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link, +			     PHY_INTERFACE_MODE_MII); +	if (IS_ERR(phydev)) { +		dev_err(sdev->dev, "could not attach PHY at %i\n", +			bp->phy_addr); +		err = PTR_ERR(phydev); +		goto err_out_mdiobus_unregister; +	} + +	/* mask with MAC supported features */ +	phydev->supported &= (SUPPORTED_100baseT_Half | +			      SUPPORTED_100baseT_Full | +			      SUPPORTED_Autoneg | +			      SUPPORTED_MII); +	phydev->advertising = phydev->supported; + +	bp->phydev = phydev; +	bp->old_link = 0; +	bp->phy_addr = phydev->addr; + +	dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", +		 phydev->drv->name, dev_name(&phydev->dev)); + +	return 0; + +err_out_mdiobus_unregister: +	mdiobus_unregister(mii_bus); + +err_out_mdiobus_irq: +	kfree(mii_bus->irq); + +err_out_mdiobus: +	mdiobus_free(mii_bus); + +err_out: +	return err; +} + +static void b44_unregister_phy_one(struct b44 *bp) +{ +	struct mii_bus *mii_bus = bp->mii_bus; + +	phy_disconnect(bp->phydev); +	mdiobus_unregister(mii_bus); +	kfree(mii_bus->irq); +	mdiobus_free(mii_bus); +} +  static int b44_init_one(struct ssb_device *sdev,  			const struct ssb_device_id *ent)  { @@ -2174,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,  	netif_napi_add(dev, &bp->napi, b44_poll, 64);  	dev->watchdog_timeo = B44_TX_TIMEOUT;  	dev->irq = sdev->irq; -	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); +	dev->ethtool_ops = &b44_ethtool_ops;  	err = ssb_bus_powerup(sdev->bus, 0);  	if (err) { @@ -2183,8 +2389,7 @@ static int b44_init_one(struct ssb_device *sdev,  		goto err_out_free_dev;  	} -	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || -	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) { +	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {  		dev_err(sdev->dev,  			"Required 30BIT DMA mask unsupported by the system\n");  		goto err_out_powerdown; @@ -2197,9 +2402,15 @@ static int b44_init_one(struct ssb_device *sdev,  		goto err_out_powerdown;  	} +	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) { +		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n"); +		err = -ENODEV; +		goto err_out_powerdown; +	} +  	bp->mii_if.dev = dev; -	bp->mii_if.mdio_read = b44_mii_read; -	bp->mii_if.mdio_write = b44_mii_write; +	bp->mii_if.mdio_read = b44_mdio_read_mii; +	bp->mii_if.mdio_write = b44_mdio_write_mii;  	bp->mii_if.phy_id = bp->phy_addr;  	bp->mii_if.phy_id_mask = 0x1f;  	bp->mii_if.reg_num_mask = 0x1f; @@ -2227,13 +2438,26 @@ static int b44_init_one(struct ssb_device *sdev,  	b44_chip_reset(bp, B44_CHIP_RESET_FULL);  	/* do a phy reset to test if there is an active phy */ -	if (b44_phy_reset(bp) < 0) -		bp->phy_addr = B44_PHY_ADDR_NO_PHY; +	err = b44_phy_reset(bp); +	if (err < 0) { +		dev_err(sdev->dev, "phy reset failed\n"); +		goto err_out_unregister_netdev; +	} + +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) { +		err = b44_register_phy_one(bp); +		if (err) { +			dev_err(sdev->dev, "Cannot register PHY, aborting\n"); +			goto err_out_unregister_netdev; +		} +	}  	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);  	return 0; +err_out_unregister_netdev: +	unregister_netdev(dev);  err_out_powerdown:  	ssb_bus_may_powerdown(sdev->bus); @@ -2247,8 +2471,11 @@ out:  static void b44_remove_one(struct ssb_device *sdev)  {  	struct net_device *dev = ssb_get_drvdata(sdev); +	struct b44 *bp = netdev_priv(dev);  	unregister_netdev(dev); +	if (bp->flags & B44_FLAG_EXTERNAL_PHY) +		b44_unregister_phy_one(bp);  	ssb_device_disable(sdev, 0);  	ssb_bus_may_powerdown(sdev->bus);  	free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h index 8993d72f042..3e9c3fc7591 100644 --- a/drivers/net/ethernet/broadcom/b44.h +++ b/drivers/net/ethernet/broadcom/b44.h @@ -280,9 +280,10 @@ struct ring_info {  	dma_addr_t	mapping;  }; -#define B44_MCAST_TABLE_SIZE	32 -#define B44_PHY_ADDR_NO_PHY	30 -#define B44_MDC_RATIO		5000000 +#define B44_MCAST_TABLE_SIZE		32 +#define B44_PHY_ADDR_NO_LOCAL_PHY	30 /* no local phy regs */ +#define B44_PHY_ADDR_NO_PHY		31 /* no phy present at all */ +#define B44_MDC_RATIO			5000000  #define	B44_STAT_REG_DECLARE		\  	_B44(tx_good_octets)		\ @@ -344,6 +345,9 @@ B44_STAT_REG_DECLARE  	struct u64_stats_sync	syncp;  }; +#define	B44_BOARDFLAG_ROBO		0x0010  /* Board has robo switch */ +#define	B44_BOARDFLAG_ADM		0x0080  /* Board has ADMtek switch */ +  struct ssb_device;  struct b44 { @@ -376,7 +380,7 @@ struct b44 {  #define B44_FLAG_ADV_10FULL	0x02000000  #define B44_FLAG_ADV_100HALF	0x04000000  #define B44_FLAG_ADV_100FULL	0x08000000 -#define B44_FLAG_INTERNAL_PHY	0x10000000 +#define B44_FLAG_EXTERNAL_PHY	0x10000000  #define B44_FLAG_RX_RING_HACK	0x20000000  #define B44_FLAG_TX_RING_HACK	0x40000000  #define B44_FLAG_WOL_ENABLE	0x80000000 @@ -396,6 +400,9 @@ struct b44 {  	u32			tx_pending;  	u8			phy_addr;  	u8			force_copybreak; +	struct phy_device	*phydev; +	struct mii_bus		*mii_bus; +	int			old_link;  	struct mii_if_info	mii_if;  }; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index b9a5fb6400d..3e8d1a88ed3 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {  }; -#define BCM_ENET_STATS_LEN	\ -	(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) +#define BCM_ENET_STATS_LEN	ARRAY_SIZE(bcm_enet_gstrings_stats)  static const u32 unused_mib_regs[] = {  	ETH_MIB_TX_ALL_OCTETS, @@ -1722,9 +1721,6 @@ static const struct net_device_ops bcm_enet_ops = {  	.ndo_set_rx_mode	= bcm_enet_set_multicast_list,  	.ndo_do_ioctl		= bcm_enet_ioctl,  	.ndo_change_mtu		= bcm_enet_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER -	.ndo_poll_controller = bcm_enet_netpoll, -#endif  };  /* @@ -1901,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)  	dev->netdev_ops = &bcm_enet_ops;  	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); -	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); +	dev->ethtool_ops = &bcm_enet_ethtool_ops;  	SET_NETDEV_DEV(dev, &pdev->dev);  	ret = register_netdev(dev); @@ -2787,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)  	/* register netdevice */  	dev->netdev_ops = &bcm_enetsw_ops;  	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); -	SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops); +	dev->ethtool_ops = &bcm_enetsw_ethtool_ops;  	SET_NETDEV_DEV(dev, &pdev->dev);  	spin_lock_init(&priv->enetsw_mdio_lock); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c new file mode 100644 index 00000000000..5776e503e4c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -0,0 +1,1633 @@ +/* + * Broadcom BCM7xxx System Port Ethernet MAC driver + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "bcmsysport.h" + +/* I/O accessors register helpers */ +#define BCM_SYSPORT_IO_MACRO(name, offset) \ +static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off)	\ +{									\ +	u32 reg = __raw_readl(priv->base + offset + off);		\ +	return reg;							\ +}									\ +static inline void name##_writel(struct bcm_sysport_priv *priv,		\ +				  u32 val, u32 off)			\ +{									\ +	__raw_writel(val, priv->base + offset + off);			\ +}									\ + +BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); +BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); +BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); +BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); +BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); +BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); +BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); +BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); +BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); +BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); + +/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied + * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. +  */ +#define BCM_SYSPORT_INTR_L2(which)	\ +static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ +						u32 mask)		\ +{									\ +	intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR);	\ +	priv->irq##which##_mask &= ~(mask);				\ +}									\ +static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ +						u32 mask)		\ +{									\ +	intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET);	\ +	priv->irq##which##_mask |= (mask);				\ +}									\ + +BCM_SYSPORT_INTR_L2(0) +BCM_SYSPORT_INTR_L2(1) + +/* Register accesses to GISB/RBUS registers are expensive (few hundred + * nanoseconds), so keep the check for 64-bits explicit here to save + * one register write per-packet on 32-bits platforms. + */ +static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, +				     void __iomem *d, +				     dma_addr_t addr) +{ +#ifdef CONFIG_PHYS_ADDR_T_64BIT +	__raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, +			d + DESC_ADDR_HI_STATUS_LEN); +#endif +	__raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); +} + +static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, +						struct dma_desc *desc, +						unsigned int port) +{ +	/* Ports are latched, so write upper address first */ +	tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); +	tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); +} + +/* Ethtool operations */ +static int bcm_sysport_set_settings(struct net_device *dev, +				    struct ethtool_cmd *cmd) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	if (!netif_running(dev)) +		return -EINVAL; + +	return phy_ethtool_sset(priv->phydev, cmd); +} + +static int bcm_sysport_get_settings(struct net_device *dev, +					struct ethtool_cmd *cmd) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	if (!netif_running(dev)) +		return -EINVAL; + +	return phy_ethtool_gset(priv->phydev, cmd); +} + +static int bcm_sysport_set_rx_csum(struct net_device *dev, +					netdev_features_t wanted) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	u32 reg; + +	priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM); +	reg = rxchk_readl(priv, RXCHK_CONTROL); +	if (priv->rx_csum_en) +		reg |= RXCHK_EN; +	else +		reg &= ~RXCHK_EN; + +	/* If UniMAC forwards CRC, we need to skip over it to get +	 * a valid CHK bit to be set in the per-packet status word +	 */ +	if (priv->rx_csum_en && priv->crc_fwd) +		reg |= RXCHK_SKIP_FCS; +	else +		reg &= ~RXCHK_SKIP_FCS; + +	rxchk_writel(priv, reg, RXCHK_CONTROL); + +	return 0; +} + +static int bcm_sysport_set_tx_csum(struct net_device *dev, +					netdev_features_t wanted) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	u32 reg; + +	/* Hardware transmit checksum requires us to enable the Transmit status +	 * block prepended to the packet contents +	 */ +	priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); +	reg = tdma_readl(priv, TDMA_CONTROL); +	if (priv->tsb_en) +		reg |= TSB_EN; +	else +		reg &= ~TSB_EN; +	tdma_writel(priv, reg, TDMA_CONTROL); + +	return 0; +} + +static int bcm_sysport_set_features(struct net_device *dev, +					netdev_features_t features) +{ +	netdev_features_t changed = features ^ dev->features; +	netdev_features_t wanted = dev->wanted_features; +	int ret = 0; + +	if (changed & NETIF_F_RXCSUM) +		ret = bcm_sysport_set_rx_csum(dev, wanted); +	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) +		ret = bcm_sysport_set_tx_csum(dev, wanted); + +	return ret; +} + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { +	/* general stats */ +	STAT_NETDEV(rx_packets), +	STAT_NETDEV(tx_packets), +	STAT_NETDEV(rx_bytes), +	STAT_NETDEV(tx_bytes), +	STAT_NETDEV(rx_errors), +	STAT_NETDEV(tx_errors), +	STAT_NETDEV(rx_dropped), +	STAT_NETDEV(tx_dropped), +	STAT_NETDEV(multicast), +	/* UniMAC RSV counters */ +	STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), +	STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), +	STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), +	STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), +	STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), +	STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), +	STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), +	STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), +	STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), +	STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), +	STAT_MIB_RX("rx_pkts", mib.rx.pkt), +	STAT_MIB_RX("rx_bytes", mib.rx.bytes), +	STAT_MIB_RX("rx_multicast", mib.rx.mca), +	STAT_MIB_RX("rx_broadcast", mib.rx.bca), +	STAT_MIB_RX("rx_fcs", mib.rx.fcs), +	STAT_MIB_RX("rx_control", mib.rx.cf), +	STAT_MIB_RX("rx_pause", mib.rx.pf), +	STAT_MIB_RX("rx_unknown", mib.rx.uo), +	STAT_MIB_RX("rx_align", mib.rx.aln), +	STAT_MIB_RX("rx_outrange", mib.rx.flr), +	STAT_MIB_RX("rx_code", mib.rx.cde), +	STAT_MIB_RX("rx_carrier", mib.rx.fcr), +	STAT_MIB_RX("rx_oversize", mib.rx.ovr), +	STAT_MIB_RX("rx_jabber", mib.rx.jbr), +	STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), +	STAT_MIB_RX("rx_good_pkts", mib.rx.pok), +	STAT_MIB_RX("rx_unicast", mib.rx.uc), +	STAT_MIB_RX("rx_ppp", mib.rx.ppp), +	STAT_MIB_RX("rx_crc", mib.rx.rcrc), +	/* UniMAC TSV counters */ +	STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), +	STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), +	STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), +	STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), +	STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), +	STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), +	STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), +	STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), +	STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), +	STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), +	STAT_MIB_TX("tx_pkts", mib.tx.pkts), +	STAT_MIB_TX("tx_multicast", mib.tx.mca), +	STAT_MIB_TX("tx_broadcast", mib.tx.bca), +	STAT_MIB_TX("tx_pause", mib.tx.pf), +	STAT_MIB_TX("tx_control", mib.tx.cf), +	STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), +	STAT_MIB_TX("tx_oversize", mib.tx.ovr), +	STAT_MIB_TX("tx_defer", mib.tx.drf), +	STAT_MIB_TX("tx_excess_defer", mib.tx.edf), +	STAT_MIB_TX("tx_single_col", mib.tx.scl), +	STAT_MIB_TX("tx_multi_col", mib.tx.mcl), +	STAT_MIB_TX("tx_late_col", mib.tx.lcl), +	STAT_MIB_TX("tx_excess_col", mib.tx.ecl), +	STAT_MIB_TX("tx_frags", mib.tx.frg), +	STAT_MIB_TX("tx_total_col", mib.tx.ncl), +	STAT_MIB_TX("tx_jabber", mib.tx.jbr), +	STAT_MIB_TX("tx_bytes", mib.tx.bytes), +	STAT_MIB_TX("tx_good_pkts", mib.tx.pok), +	STAT_MIB_TX("tx_unicast", mib.tx.uc), +	/* UniMAC RUNT counters */ +	STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), +	STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), +	STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), +	STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), +	/* RXCHK misc statistics */ +	STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), +	STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, +			RXCHK_OTHER_DISC_CNTR), +	/* RBUF misc statistics */ +	STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), +	STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), +}; + +#define BCM_SYSPORT_STATS_LEN	ARRAY_SIZE(bcm_sysport_gstrings_stats) + +static void bcm_sysport_get_drvinfo(struct net_device *dev, +					struct ethtool_drvinfo *info) +{ +	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); +	strlcpy(info->version, "0.1", sizeof(info->version)); +	strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); +	info->n_stats = BCM_SYSPORT_STATS_LEN; +} + +static u32 bcm_sysport_get_msglvl(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	return priv->msg_enable; +} + +static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	priv->msg_enable = enable; +} + +static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) +{ +	switch (string_set) { +	case ETH_SS_STATS: +		return BCM_SYSPORT_STATS_LEN; +	default: +		return -EOPNOTSUPP; +	} +} + +static void bcm_sysport_get_strings(struct net_device *dev, +					u32 stringset, u8 *data) +{ +	int i; + +	switch (stringset) { +	case ETH_SS_STATS: +		for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { +			memcpy(data + i * ETH_GSTRING_LEN, +				bcm_sysport_gstrings_stats[i].stat_string, +				ETH_GSTRING_LEN); +		} +		break; +	default: +		break; +	} +} + +static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) +{ +	int i, j = 0; + +	for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { +		const struct bcm_sysport_stats *s; +		u8 offset = 0; +		u32 val = 0; +		char *p; + +		s = &bcm_sysport_gstrings_stats[i]; +		switch (s->type) { +		case BCM_SYSPORT_STAT_NETDEV: +			continue; +		case BCM_SYSPORT_STAT_MIB_RX: +		case BCM_SYSPORT_STAT_MIB_TX: +		case BCM_SYSPORT_STAT_RUNT: +			if (s->type != BCM_SYSPORT_STAT_MIB_RX) +				offset = UMAC_MIB_STAT_OFFSET; +			val = umac_readl(priv, UMAC_MIB_START + j + offset); +			break; +		case BCM_SYSPORT_STAT_RXCHK: +			val = rxchk_readl(priv, s->reg_offset); +			if (val == ~0) +				rxchk_writel(priv, 0, s->reg_offset); +			break; +		case BCM_SYSPORT_STAT_RBUF: +			val = rbuf_readl(priv, s->reg_offset); +			if (val == ~0) +				rbuf_writel(priv, 0, s->reg_offset); +			break; +		} + +		j += s->stat_sizeof; +		p = (char *)priv + s->stat_offset; +		*(u32 *)p = val; +	} + +	netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); +} + +static void bcm_sysport_get_stats(struct net_device *dev, +					struct ethtool_stats *stats, u64 *data) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	int i; + +	if (netif_running(dev)) +		bcm_sysport_update_mib_counters(priv); + +	for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) { +		const struct bcm_sysport_stats *s; +		char *p; + +		s = &bcm_sysport_gstrings_stats[i]; +		if (s->type == BCM_SYSPORT_STAT_NETDEV) +			p = (char *)&dev->stats; +		else +			p = (char *)priv; +		p += s->stat_offset; +		data[i] = *(u32 *)p; +	} +} + +static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) +{ +	dev_kfree_skb_any(cb->skb); +	cb->skb = NULL; +	dma_unmap_addr_set(cb, dma_addr, 0); +} + +static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, +				 struct bcm_sysport_cb *cb) +{ +	struct device *kdev = &priv->pdev->dev; +	struct net_device *ndev = priv->netdev; +	dma_addr_t mapping; +	int ret; + +	cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); +	if (!cb->skb) { +		netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); +		return -ENOMEM; +	} + +	mapping = dma_map_single(kdev, cb->skb->data, +				RX_BUF_LENGTH, DMA_FROM_DEVICE); +	ret = dma_mapping_error(kdev, mapping); +	if (ret) { +		bcm_sysport_free_cb(cb); +		netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); +		return ret; +	} + +	dma_unmap_addr_set(cb, dma_addr, mapping); +	dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + +	priv->rx_bd_assign_index++; +	priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); +	priv->rx_bd_assign_ptr = priv->rx_bds + +		(priv->rx_bd_assign_index * DESC_SIZE); + +	netif_dbg(priv, rx_status, ndev, "RX refill\n"); + +	return 0; +} + +static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) +{ +	struct bcm_sysport_cb *cb; +	int ret = 0; +	unsigned int i; + +	for (i = 0; i < priv->num_rx_bds; i++) { +		cb = &priv->rx_cbs[priv->rx_bd_assign_index]; +		if (cb->skb) +			continue; + +		ret = bcm_sysport_rx_refill(priv, cb); +		if (ret) +			break; +	} + +	return ret; +} + +/* Poll the hardware for up to budget packets to process */ +static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, +					unsigned int budget) +{ +	struct device *kdev = &priv->pdev->dev; +	struct net_device *ndev = priv->netdev; +	unsigned int processed = 0, to_process; +	struct bcm_sysport_cb *cb; +	struct sk_buff *skb; +	unsigned int p_index; +	u16 len, status; +	struct bcm_rsb *rsb; + +	/* Determine how much we should process since last call */ +	p_index = rdma_readl(priv, RDMA_PROD_INDEX); +	p_index &= RDMA_PROD_INDEX_MASK; + +	if (p_index < priv->rx_c_index) +		to_process = (RDMA_CONS_INDEX_MASK + 1) - +			priv->rx_c_index + p_index; +	else +		to_process = p_index - priv->rx_c_index; + +	netif_dbg(priv, rx_status, ndev, +			"p_index=%d rx_c_index=%d to_process=%d\n", +			p_index, priv->rx_c_index, to_process); + +	while ((processed < to_process) && +		(processed < budget)) { + +		cb = &priv->rx_cbs[priv->rx_read_ptr]; +		skb = cb->skb; +		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), +				RX_BUF_LENGTH, DMA_FROM_DEVICE); + +		/* Extract the Receive Status Block prepended */ +		rsb = (struct bcm_rsb *)skb->data; +		len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; +		status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & +			DESC_STATUS_MASK; + +		processed++; +		priv->rx_read_ptr++; +		if (priv->rx_read_ptr == priv->num_rx_bds) +			priv->rx_read_ptr = 0; + +		netif_dbg(priv, rx_status, ndev, +				"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", +				p_index, priv->rx_c_index, priv->rx_read_ptr, +				len, status); + +		if (unlikely(!skb)) { +			netif_err(priv, rx_err, ndev, "out of memory!\n"); +			ndev->stats.rx_dropped++; +			ndev->stats.rx_errors++; +			goto refill; +		} + +		if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { +			netif_err(priv, rx_status, ndev, "fragmented packet!\n"); +			ndev->stats.rx_dropped++; +			ndev->stats.rx_errors++; +			bcm_sysport_free_cb(cb); +			goto refill; +		} + +		if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { +			netif_err(priv, rx_err, ndev, "error packet\n"); +			if (status & RX_STATUS_OVFLOW) +				ndev->stats.rx_over_errors++; +			ndev->stats.rx_dropped++; +			ndev->stats.rx_errors++; +			bcm_sysport_free_cb(cb); +			goto refill; +		} + +		skb_put(skb, len); + +		/* Hardware validated our checksum */ +		if (likely(status & DESC_L4_CSUM)) +			skb->ip_summed = CHECKSUM_UNNECESSARY; + +		/* Hardware pre-pends packets with 2bytes before Ethernet +		 * header plus we have the Receive Status Block, strip off all +		 * of this from the SKB. +		 */ +		skb_pull(skb, sizeof(*rsb) + 2); +		len -= (sizeof(*rsb) + 2); + +		/* UniMAC may forward CRC */ +		if (priv->crc_fwd) { +			skb_trim(skb, len - ETH_FCS_LEN); +			len -= ETH_FCS_LEN; +		} + +		skb->protocol = eth_type_trans(skb, ndev); +		ndev->stats.rx_packets++; +		ndev->stats.rx_bytes += len; + +		napi_gro_receive(&priv->napi, skb); +refill: +		bcm_sysport_rx_refill(priv, cb); +	} + +	return processed; +} + +static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, +					struct bcm_sysport_cb *cb, +					unsigned int *bytes_compl, +					unsigned int *pkts_compl) +{ +	struct device *kdev = &priv->pdev->dev; +	struct net_device *ndev = priv->netdev; + +	if (cb->skb) { +		ndev->stats.tx_bytes += cb->skb->len; +		*bytes_compl += cb->skb->len; +		dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), +				dma_unmap_len(cb, dma_len), +				DMA_TO_DEVICE); +		ndev->stats.tx_packets++; +		(*pkts_compl)++; +		bcm_sysport_free_cb(cb); +	/* SKB fragment */ +	} else if (dma_unmap_addr(cb, dma_addr)) { +		ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); +		dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), +				dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); +		dma_unmap_addr_set(cb, dma_addr, 0); +	} +} + +/* Reclaim queued SKBs for transmission completion, lockless version */ +static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, +					     struct bcm_sysport_tx_ring *ring) +{ +	struct net_device *ndev = priv->netdev; +	unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; +	unsigned int pkts_compl = 0, bytes_compl = 0; +	struct bcm_sysport_cb *cb; +	struct netdev_queue *txq; +	u32 hw_ind; + +	txq = netdev_get_tx_queue(ndev, ring->index); + +	/* Compute how many descriptors have been processed since last call */ +	hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); +	c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; +	ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); + +	last_c_index = ring->c_index; +	num_tx_cbs = ring->size; + +	c_index &= (num_tx_cbs - 1); + +	if (c_index >= last_c_index) +		last_tx_cn = c_index - last_c_index; +	else +		last_tx_cn = num_tx_cbs - last_c_index + c_index; + +	netif_dbg(priv, tx_done, ndev, +			"ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", +			ring->index, c_index, last_tx_cn, last_c_index); + +	while (last_tx_cn-- > 0) { +		cb = ring->cbs + last_c_index; +		bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); + +		ring->desc_count++; +		last_c_index++; +		last_c_index &= (num_tx_cbs - 1); +	} + +	ring->c_index = c_index; + +	if (netif_tx_queue_stopped(txq) && pkts_compl) +		netif_tx_wake_queue(txq); + +	netif_dbg(priv, tx_done, ndev, +			"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", +			ring->index, ring->c_index, pkts_compl, bytes_compl); + +	return pkts_compl; +} + +/* Locked version of the per-ring TX reclaim routine */ +static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, +					   struct bcm_sysport_tx_ring *ring) +{ +	unsigned int released; +	unsigned long flags; + +	spin_lock_irqsave(&ring->lock, flags); +	released = __bcm_sysport_tx_reclaim(priv, ring); +	spin_unlock_irqrestore(&ring->lock, flags); + +	return released; +} + +static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) +{ +	struct bcm_sysport_tx_ring *ring = +		container_of(napi, struct bcm_sysport_tx_ring, napi); +	unsigned int work_done = 0; + +	work_done = bcm_sysport_tx_reclaim(ring->priv, ring); + +	if (work_done == 0) { +		napi_complete(napi); +		/* re-enable TX interrupt */ +		intrl2_1_mask_clear(ring->priv, BIT(ring->index)); +	} + +	return 0; +} + +static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) +{ +	unsigned int q; + +	for (q = 0; q < priv->netdev->num_tx_queues; q++) +		bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); +} + +static int bcm_sysport_poll(struct napi_struct *napi, int budget) +{ +	struct bcm_sysport_priv *priv = +		container_of(napi, struct bcm_sysport_priv, napi); +	unsigned int work_done = 0; + +	work_done = bcm_sysport_desc_rx(priv, budget); + +	priv->rx_c_index += work_done; +	priv->rx_c_index &= RDMA_CONS_INDEX_MASK; +	rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); + +	if (work_done < budget) { +		napi_complete(napi); +		/* re-enable RX interrupts */ +		intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); +	} + +	return work_done; +} + + +/* RX and misc interrupt routine */ +static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) +{ +	struct net_device *dev = dev_id; +	struct bcm_sysport_priv *priv = netdev_priv(dev); + +	priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & +			  ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); +	intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + +	if (unlikely(priv->irq0_stat == 0)) { +		netdev_warn(priv->netdev, "spurious RX interrupt\n"); +		return IRQ_NONE; +	} + +	if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { +		if (likely(napi_schedule_prep(&priv->napi))) { +			/* disable RX interrupts */ +			intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); +			__napi_schedule(&priv->napi); +		} +	} + +	/* TX ring is full, perform a full reclaim since we do not know +	 * which one would trigger this interrupt +	 */ +	if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) +		bcm_sysport_tx_reclaim_all(priv); + +	return IRQ_HANDLED; +} + +/* TX interrupt service routine */ +static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) +{ +	struct net_device *dev = dev_id; +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	struct bcm_sysport_tx_ring *txr; +	unsigned int ring; + +	priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & +				~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); +	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + +	if (unlikely(priv->irq1_stat == 0)) { +		netdev_warn(priv->netdev, "spurious TX interrupt\n"); +		return IRQ_NONE; +	} + +	for (ring = 0; ring < dev->num_tx_queues; ring++) { +		if (!(priv->irq1_stat & BIT(ring))) +			continue; + +		txr = &priv->tx_rings[ring]; + +		if (likely(napi_schedule_prep(&txr->napi))) { +			intrl2_1_mask_set(priv, BIT(ring)); +			__napi_schedule(&txr->napi); +		} +	} + +	return IRQ_HANDLED; +} + +static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev) +{ +	struct sk_buff *nskb; +	struct bcm_tsb *tsb; +	u32 csum_info; +	u8 ip_proto; +	u16 csum_start; +	u16 ip_ver; + +	/* Re-allocate SKB if needed */ +	if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { +		nskb = skb_realloc_headroom(skb, sizeof(*tsb)); +		dev_kfree_skb(skb); +		if (!nskb) { +			dev->stats.tx_errors++; +			dev->stats.tx_dropped++; +			return -ENOMEM; +		} +		skb = nskb; +	} + +	tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); +	/* Zero-out TSB by default */ +	memset(tsb, 0, sizeof(*tsb)); + +	if (skb->ip_summed == CHECKSUM_PARTIAL) { +		ip_ver = htons(skb->protocol); +		switch (ip_ver) { +		case ETH_P_IP: +			ip_proto = ip_hdr(skb)->protocol; +			break; +		case ETH_P_IPV6: +			ip_proto = ipv6_hdr(skb)->nexthdr; +			break; +		default: +			return 0; +		} + +		/* Get the checksum offset and the L4 (transport) offset */ +		csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); +		csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; +		csum_info |= (csum_start << L4_PTR_SHIFT); + +		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { +			csum_info |= L4_LENGTH_VALID; +			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) +				csum_info |= L4_UDP; +		} else +			csum_info = 0; + +		tsb->l4_ptr_dest_map = csum_info; +	} + +	return 0; +} + +static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, +				    struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	struct device *kdev = &priv->pdev->dev; +	struct bcm_sysport_tx_ring *ring; +	struct bcm_sysport_cb *cb; +	struct netdev_queue *txq; +	struct dma_desc *desc; +	unsigned int skb_len; +	unsigned long flags; +	dma_addr_t mapping; +	u32 len_status; +	u16 queue; +	int ret; + +	queue = skb_get_queue_mapping(skb); +	txq = netdev_get_tx_queue(dev, queue); +	ring = &priv->tx_rings[queue]; + +	/* lock against tx reclaim in BH context and TX ring full interrupt */ +	spin_lock_irqsave(&ring->lock, flags); +	if (unlikely(ring->desc_count == 0)) { +		netif_tx_stop_queue(txq); +		netdev_err(dev, "queue %d awake and ring full!\n", queue); +		ret = NETDEV_TX_BUSY; +		goto out; +	} + +	/* Insert TSB and checksum infos */ +	if (priv->tsb_en) { +		ret = bcm_sysport_insert_tsb(skb, dev); +		if (ret) { +			ret = NETDEV_TX_OK; +			goto out; +		} +	} + +	/* The Ethernet switch we are interfaced with needs packets to be at +	 * least 64 bytes (including FCS) otherwise they will be discarded when +	 * they enter the switch port logic. When Broadcom tags are enabled, we +	 * need to make sure that packets are at least 68 bytes +	 * (including FCS and tag) because the length verification is done after +	 * the Broadcom tag is stripped off the ingress packet. +	 */ +	if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { +		ret = NETDEV_TX_OK; +		goto out; +	} + +	skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? +			ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; + +	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); +	if (dma_mapping_error(kdev, mapping)) { +		netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", +				skb->data, skb_len); +		ret = NETDEV_TX_OK; +		goto out; +	} + +	/* Remember the SKB for future freeing */ +	cb = &ring->cbs[ring->curr_desc]; +	cb->skb = skb; +	dma_unmap_addr_set(cb, dma_addr, mapping); +	dma_unmap_len_set(cb, dma_len, skb_len); + +	/* Fetch a descriptor entry from our pool */ +	desc = ring->desc_cpu; + +	desc->addr_lo = lower_32_bits(mapping); +	len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; +	len_status |= (skb_len << DESC_LEN_SHIFT); +	len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << +			DESC_STATUS_SHIFT; +	if (skb->ip_summed == CHECKSUM_PARTIAL) +		len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); + +	ring->curr_desc++; +	if (ring->curr_desc == ring->size) +		ring->curr_desc = 0; +	ring->desc_count--; + +	/* Ensure write completion of the descriptor status/length +	 * in DRAM before the System Port WRITE_PORT register latches +	 * the value +	 */ +	wmb(); +	desc->addr_status_len = len_status; +	wmb(); + +	/* Write this descriptor address to the RING write port */ +	tdma_port_write_desc_addr(priv, desc, ring->index); + +	/* Check ring space and update SW control flow */ +	if (ring->desc_count == 0) +		netif_tx_stop_queue(txq); + +	netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", +			ring->index, ring->desc_count, ring->curr_desc); + +	ret = NETDEV_TX_OK; +out: +	spin_unlock_irqrestore(&ring->lock, flags); +	return ret; +} + +static void bcm_sysport_tx_timeout(struct net_device *dev) +{ +	netdev_warn(dev, "transmit timeout!\n"); + +	dev->trans_start = jiffies; +	dev->stats.tx_errors++; + +	netif_tx_wake_all_queues(dev); +} + +/* phylib adjust link callback */ +static void bcm_sysport_adj_link(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; +	unsigned int changed = 0; +	u32 cmd_bits = 0, reg; + +	if (priv->old_link != phydev->link) { +		changed = 1; +		priv->old_link = phydev->link; +	} + +	if (priv->old_duplex != phydev->duplex) { +		changed = 1; +		priv->old_duplex = phydev->duplex; +	} + +	switch (phydev->speed) { +	case SPEED_2500: +		cmd_bits = CMD_SPEED_2500; +		break; +	case SPEED_1000: +		cmd_bits = CMD_SPEED_1000; +		break; +	case SPEED_100: +		cmd_bits = CMD_SPEED_100; +		break; +	case SPEED_10: +		cmd_bits = CMD_SPEED_10; +		break; +	default: +		break; +	} +	cmd_bits <<= CMD_SPEED_SHIFT; + +	if (phydev->duplex == DUPLEX_HALF) +		cmd_bits |= CMD_HD_EN; + +	if (priv->old_pause != phydev->pause) { +		changed = 1; +		priv->old_pause = phydev->pause; +	} + +	if (!phydev->pause) +		cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + +	if (changed) { +		reg = umac_readl(priv, UMAC_CMD); +		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | +			CMD_HD_EN | CMD_RX_PAUSE_IGNORE | +			CMD_TX_PAUSE_IGNORE); +		reg |= cmd_bits; +		umac_writel(priv, reg, UMAC_CMD); + +		phy_print_status(priv->phydev); +	} +} + +static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, +				    unsigned int index) +{ +	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; +	struct device *kdev = &priv->pdev->dev; +	size_t size; +	void *p; +	u32 reg; + +	/* Simple descriptors partitioning for now */ +	size = 256; + +	/* We just need one DMA descriptor which is DMA-able, since writing to +	 * the port will allocate a new descriptor in its internal linked-list +	 */ +	p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); +	if (!p) { +		netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); +		return -ENOMEM; +	} + +	ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL); +	if (!ring->cbs) { +		netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); +		return -ENOMEM; +	} + +	/* Initialize SW view of the ring */ +	spin_lock_init(&ring->lock); +	ring->priv = priv; +	netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); +	ring->index = index; +	ring->size = size; +	ring->alloc_size = ring->size; +	ring->desc_cpu = p; +	ring->desc_count = ring->size; +	ring->curr_desc = 0; + +	/* Initialize HW ring */ +	tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); +	tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); +	tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); +	tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); +	tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); +	tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + +	/* Program the number of descriptors as MAX_THRESHOLD and half of +	 * its size for the hysteresis trigger +	 */ +	tdma_writel(priv, ring->size | +			1 << RING_HYST_THRESH_SHIFT, +			TDMA_DESC_RING_MAX_HYST(index)); + +	/* Enable the ring queue in the arbiter */ +	reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); +	reg |= (1 << index); +	tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); + +	napi_enable(&ring->napi); + +	netif_dbg(priv, hw, priv->netdev, +			"TDMA cfg, size=%d, desc_cpu=%p\n", +			ring->size, ring->desc_cpu); + +	return 0; +} + +static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, +					unsigned int index) +{ +	struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; +	struct device *kdev = &priv->pdev->dev; +	u32 reg; + +	/* Caller should stop the TDMA engine */ +	reg = tdma_readl(priv, TDMA_STATUS); +	if (!(reg & TDMA_DISABLED)) +		netdev_warn(priv->netdev, "TDMA not stopped!\n"); + +	napi_disable(&ring->napi); +	netif_napi_del(&ring->napi); + +	bcm_sysport_tx_reclaim(priv, ring); + +	kfree(ring->cbs); +	ring->cbs = NULL; + +	if (ring->desc_dma) { +		dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); +		ring->desc_dma = 0; +	} +	ring->size = 0; +	ring->alloc_size = 0; + +	netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); +} + +/* RDMA helper */ +static inline int rdma_enable_set(struct bcm_sysport_priv *priv, +					unsigned int enable) +{ +	unsigned int timeout = 1000; +	u32 reg; + +	reg = rdma_readl(priv, RDMA_CONTROL); +	if (enable) +		reg |= RDMA_EN; +	else +		reg &= ~RDMA_EN; +	rdma_writel(priv, reg, RDMA_CONTROL); + +	/* Poll for RMDA disabling completion */ +	do { +		reg = rdma_readl(priv, RDMA_STATUS); +		if (!!(reg & RDMA_DISABLED) == !enable) +			return 0; +		usleep_range(1000, 2000); +	} while (timeout-- > 0); + +	netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); + +	return -ETIMEDOUT; +} + +/* TDMA helper */ +static inline int tdma_enable_set(struct bcm_sysport_priv *priv, +					unsigned int enable) +{ +	unsigned int timeout = 1000; +	u32 reg; + +	reg = tdma_readl(priv, TDMA_CONTROL); +	if (enable) +		reg |= TDMA_EN; +	else +		reg &= ~TDMA_EN; +	tdma_writel(priv, reg, TDMA_CONTROL); + +	/* Poll for TMDA disabling completion */ +	do { +		reg = tdma_readl(priv, TDMA_STATUS); +		if (!!(reg & TDMA_DISABLED) == !enable) +			return 0; + +		usleep_range(1000, 2000); +	} while (timeout-- > 0); + +	netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); + +	return -ETIMEDOUT; +} + +static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) +{ +	u32 reg; +	int ret; + +	/* Initialize SW view of the RX ring */ +	priv->num_rx_bds = NUM_RX_DESC; +	priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; +	priv->rx_bd_assign_ptr = priv->rx_bds; +	priv->rx_bd_assign_index = 0; +	priv->rx_c_index = 0; +	priv->rx_read_ptr = 0; +	priv->rx_cbs = kzalloc(priv->num_rx_bds * +				sizeof(struct bcm_sysport_cb), GFP_KERNEL); +	if (!priv->rx_cbs) { +		netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); +		return -ENOMEM; +	} + +	ret = bcm_sysport_alloc_rx_bufs(priv); +	if (ret) { +		netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); +		return ret; +	} + +	/* Initialize HW, ensure RDMA is disabled */ +	reg = rdma_readl(priv, RDMA_STATUS); +	if (!(reg & RDMA_DISABLED)) +		rdma_enable_set(priv, 0); + +	rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); +	rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); +	rdma_writel(priv, 0, RDMA_PROD_INDEX); +	rdma_writel(priv, 0, RDMA_CONS_INDEX); +	rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | +			  RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); +	/* Operate the queue in ring mode */ +	rdma_writel(priv, 0, RDMA_START_ADDR_HI); +	rdma_writel(priv, 0, RDMA_START_ADDR_LO); +	rdma_writel(priv, 0, RDMA_END_ADDR_HI); +	rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); + +	rdma_writel(priv, 1, RDMA_MBDONE_INTR); + +	netif_dbg(priv, hw, priv->netdev, +			"RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", +			priv->num_rx_bds, priv->rx_bds); + +	return 0; +} + +static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) +{ +	struct bcm_sysport_cb *cb; +	unsigned int i; +	u32 reg; + +	/* Caller should ensure RDMA is disabled */ +	reg = rdma_readl(priv, RDMA_STATUS); +	if (!(reg & RDMA_DISABLED)) +		netdev_warn(priv->netdev, "RDMA not stopped!\n"); + +	for (i = 0; i < priv->num_rx_bds; i++) { +		cb = &priv->rx_cbs[i]; +		if (dma_unmap_addr(cb, dma_addr)) +			dma_unmap_single(&priv->pdev->dev, +					dma_unmap_addr(cb, dma_addr), +					RX_BUF_LENGTH, DMA_FROM_DEVICE); +		bcm_sysport_free_cb(cb); +	} + +	kfree(priv->rx_cbs); +	priv->rx_cbs = NULL; + +	netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); +} + +static void bcm_sysport_set_rx_mode(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	u32 reg; + +	reg = umac_readl(priv, UMAC_CMD); +	if (dev->flags & IFF_PROMISC) +		reg |= CMD_PROMISC; +	else +		reg &= ~CMD_PROMISC; +	umac_writel(priv, reg, UMAC_CMD); + +	/* No support for ALLMULTI */ +	if (dev->flags & IFF_ALLMULTI) +		return; +} + +static inline void umac_enable_set(struct bcm_sysport_priv *priv, +					unsigned int enable) +{ +	u32 reg; + +	reg = umac_readl(priv, UMAC_CMD); +	if (enable) +		reg |= CMD_RX_EN | CMD_TX_EN; +	else +		reg &= ~(CMD_RX_EN | CMD_TX_EN); +	umac_writel(priv, reg, UMAC_CMD); + +	/* UniMAC stops on a packet boundary, wait for a full-sized packet +	 * to be processed (1 msec). +	 */ +	if (enable == 0) +		usleep_range(1000, 2000); +} + +static inline void umac_reset(struct bcm_sysport_priv *priv) +{ +	u32 reg; + +	reg = umac_readl(priv, UMAC_CMD); +	reg |= CMD_SW_RESET; +	umac_writel(priv, reg, UMAC_CMD); +	udelay(10); +	reg = umac_readl(priv, UMAC_CMD); +	reg &= ~CMD_SW_RESET; +	umac_writel(priv, reg, UMAC_CMD); +} + +static void umac_set_hw_addr(struct bcm_sysport_priv *priv, +				unsigned char *addr) +{ +	umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | +			(addr[2] << 8) | addr[3], UMAC_MAC0); +	umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); +} + +static void topctrl_flush(struct bcm_sysport_priv *priv) +{ +	topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); +	topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); +	mdelay(1); +	topctrl_writel(priv, 0, RX_FLUSH_CNTL); +	topctrl_writel(priv, 0, TX_FLUSH_CNTL); +} + +static int bcm_sysport_open(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	unsigned int i; +	u32 reg; +	int ret; + +	/* Reset UniMAC */ +	umac_reset(priv); + +	/* Flush TX and RX FIFOs at TOPCTRL level */ +	topctrl_flush(priv); + +	/* Disable the UniMAC RX/TX */ +	umac_enable_set(priv, 0); + +	/* Enable RBUF 2bytes alignment and Receive Status Block */ +	reg = rbuf_readl(priv, RBUF_CONTROL); +	reg |= RBUF_4B_ALGN | RBUF_RSB_EN; +	rbuf_writel(priv, reg, RBUF_CONTROL); + +	/* Set maximum frame length */ +	umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + +	/* Set MAC address */ +	umac_set_hw_addr(priv, dev->dev_addr); + +	/* Read CRC forward */ +	priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); + +	priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, +					0, priv->phy_interface); +	if (!priv->phydev) { +		netdev_err(dev, "could not attach to PHY\n"); +		return -ENODEV; +	} + +	/* Reset house keeping link status */ +	priv->old_duplex = -1; +	priv->old_link = -1; +	priv->old_pause = -1; + +	/* mask all interrupts and request them */ +	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); +	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); +	intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); +	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); +	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); +	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + +	ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); +	if (ret) { +		netdev_err(dev, "failed to request RX interrupt\n"); +		goto out_phy_disconnect; +	} + +	ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); +	if (ret) { +		netdev_err(dev, "failed to request TX interrupt\n"); +		goto out_free_irq0; +	} + +	/* Initialize both hardware and software ring */ +	for (i = 0; i < dev->num_tx_queues; i++) { +		ret = bcm_sysport_init_tx_ring(priv, i); +		if (ret) { +			netdev_err(dev, "failed to initialize TX ring %d\n", +					i); +			goto out_free_tx_ring; +		} +	} + +	/* Initialize linked-list */ +	tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); + +	/* Initialize RX ring */ +	ret = bcm_sysport_init_rx_ring(priv); +	if (ret) { +		netdev_err(dev, "failed to initialize RX ring\n"); +		goto out_free_rx_ring; +	} + +	/* Turn on RDMA */ +	ret = rdma_enable_set(priv, 1); +	if (ret) +		goto out_free_rx_ring; + +	/* Enable RX interrupt and TX ring full interrupt */ +	intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); + +	/* Turn on TDMA */ +	ret = tdma_enable_set(priv, 1); +	if (ret) +		goto out_clear_rx_int; + +	/* Enable NAPI */ +	napi_enable(&priv->napi); + +	/* Turn on UniMAC TX/RX */ +	umac_enable_set(priv, 1); + +	phy_start(priv->phydev); + +	/* Enable TX interrupts for the 32 TXQs */ +	intrl2_1_mask_clear(priv, 0xffffffff); + +	/* Last call before we start the real business */ +	netif_tx_start_all_queues(dev); + +	return 0; + +out_clear_rx_int: +	intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); +out_free_rx_ring: +	bcm_sysport_fini_rx_ring(priv); +out_free_tx_ring: +	for (i = 0; i < dev->num_tx_queues; i++) +		bcm_sysport_fini_tx_ring(priv, i); +	free_irq(priv->irq1, dev); +out_free_irq0: +	free_irq(priv->irq0, dev); +out_phy_disconnect: +	phy_disconnect(priv->phydev); +	return ret; +} + +static int bcm_sysport_stop(struct net_device *dev) +{ +	struct bcm_sysport_priv *priv = netdev_priv(dev); +	unsigned int i; +	u32 reg; +	int ret; + +	/* stop all software from updating hardware */ +	netif_tx_stop_all_queues(dev); +	napi_disable(&priv->napi); +	phy_stop(priv->phydev); + +	/* mask all interrupts */ +	intrl2_0_mask_set(priv, 0xffffffff); +	intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); +	intrl2_1_mask_set(priv, 0xffffffff); +	intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + +	/* Disable UniMAC RX */ +	reg = umac_readl(priv, UMAC_CMD); +	reg &= ~CMD_RX_EN; +	umac_writel(priv, reg, UMAC_CMD); + +	ret = tdma_enable_set(priv, 0); +	if (ret) { +		netdev_err(dev, "timeout disabling RDMA\n"); +		return ret; +	} + +	/* Wait for a maximum packet size to be drained */ +	usleep_range(2000, 3000); + +	ret = rdma_enable_set(priv, 0); +	if (ret) { +		netdev_err(dev, "timeout disabling TDMA\n"); +		return ret; +	} + +	/* Disable UniMAC TX */ +	reg = umac_readl(priv, UMAC_CMD); +	reg &= ~CMD_TX_EN; +	umac_writel(priv, reg, UMAC_CMD); + +	/* Free RX/TX rings SW structures */ +	for (i = 0; i < dev->num_tx_queues; i++) +		bcm_sysport_fini_tx_ring(priv, i); +	bcm_sysport_fini_rx_ring(priv); + +	free_irq(priv->irq0, dev); +	free_irq(priv->irq1, dev); + +	/* Disconnect from PHY */ +	phy_disconnect(priv->phydev); + +	return 0; +} + +static struct ethtool_ops bcm_sysport_ethtool_ops = { +	.get_settings		= bcm_sysport_get_settings, +	.set_settings		= bcm_sysport_set_settings, +	.get_drvinfo		= bcm_sysport_get_drvinfo, +	.get_msglevel		= bcm_sysport_get_msglvl, +	.set_msglevel		= bcm_sysport_set_msglvl, +	.get_link		= ethtool_op_get_link, +	.get_strings		= bcm_sysport_get_strings, +	.get_ethtool_stats	= bcm_sysport_get_stats, +	.get_sset_count		= bcm_sysport_get_sset_count, +}; + +static const struct net_device_ops bcm_sysport_netdev_ops = { +	.ndo_start_xmit		= bcm_sysport_xmit, +	.ndo_tx_timeout		= bcm_sysport_tx_timeout, +	.ndo_open		= bcm_sysport_open, +	.ndo_stop		= bcm_sysport_stop, +	.ndo_set_features	= bcm_sysport_set_features, +	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode, +}; + +#define REV_FMT	"v%2x.%02x" + +static int bcm_sysport_probe(struct platform_device *pdev) +{ +	struct bcm_sysport_priv *priv; +	struct device_node *dn; +	struct net_device *dev; +	const void *macaddr; +	struct resource *r; +	u32 txq, rxq; +	int ret; + +	dn = pdev->dev.of_node; +	r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +	/* Read the Transmit/Receive Queue properties */ +	if (of_property_read_u32(dn, "systemport,num-txq", &txq)) +		txq = TDMA_NUM_RINGS; +	if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) +		rxq = 1; + +	dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); +	if (!dev) +		return -ENOMEM; + +	/* Initialize private members */ +	priv = netdev_priv(dev); + +	priv->irq0 = platform_get_irq(pdev, 0); +	priv->irq1 = platform_get_irq(pdev, 1); +	if (priv->irq0 <= 0 || priv->irq1 <= 0) { +		dev_err(&pdev->dev, "invalid interrupts\n"); +		ret = -EINVAL; +		goto err; +	} + +	priv->base = devm_ioremap_resource(&pdev->dev, r); +	if (IS_ERR(priv->base)) { +		ret = PTR_ERR(priv->base); +		goto err; +	} + +	priv->netdev = dev; +	priv->pdev = pdev; + +	priv->phy_interface = of_get_phy_mode(dn); +	/* Default to GMII interface mode */ +	if (priv->phy_interface < 0) +		priv->phy_interface = PHY_INTERFACE_MODE_GMII; + +	/* In the case of a fixed PHY, the DT node associated +	 * to the PHY is the Ethernet MAC DT node. +	 */ +	if (of_phy_is_fixed_link(dn)) { +		ret = of_phy_register_fixed_link(dn); +		if (ret) { +			dev_err(&pdev->dev, "failed to register fixed PHY\n"); +			goto err; +		} + +		priv->phy_dn = dn; +	} + +	/* Initialize netdevice members */ +	macaddr = of_get_mac_address(dn); +	if (!macaddr || !is_valid_ether_addr(macaddr)) { +		dev_warn(&pdev->dev, "using random Ethernet MAC\n"); +		random_ether_addr(dev->dev_addr); +	} else { +		ether_addr_copy(dev->dev_addr, macaddr); +	} + +	SET_NETDEV_DEV(dev, &pdev->dev); +	dev_set_drvdata(&pdev->dev, dev); +	dev->ethtool_ops = &bcm_sysport_ethtool_ops; +	dev->netdev_ops = &bcm_sysport_netdev_ops; +	netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); + +	/* HW supported features, none enabled by default */ +	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | +				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + +	/* Set the needed headroom once and for all */ +	BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); +	dev->needed_headroom += sizeof(struct bcm_tsb); + +	/* libphy will adjust the link state accordingly */ +	netif_carrier_off(dev); + +	ret = register_netdev(dev); +	if (ret) { +		dev_err(&pdev->dev, "failed to register net_device\n"); +		goto err; +	} + +	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; +	dev_info(&pdev->dev, +		"Broadcom SYSTEMPORT" REV_FMT +		" at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", +		(priv->rev >> 8) & 0xff, priv->rev & 0xff, +		priv->base, priv->irq0, priv->irq1, txq, rxq); + +	return 0; +err: +	free_netdev(dev); +	return ret; +} + +static int bcm_sysport_remove(struct platform_device *pdev) +{ +	struct net_device *dev = dev_get_drvdata(&pdev->dev); + +	/* Not much to do, ndo_close has been called +	 * and we use managed allocations +	 */ +	unregister_netdev(dev); +	free_netdev(dev); +	dev_set_drvdata(&pdev->dev, NULL); + +	return 0; +} + +static const struct of_device_id bcm_sysport_of_match[] = { +	{ .compatible = "brcm,systemport-v1.00" }, +	{ .compatible = "brcm,systemport" }, +	{ /* sentinel */ } +}; + +static struct platform_driver bcm_sysport_driver = { +	.probe	= bcm_sysport_probe, +	.remove	= bcm_sysport_remove, +	.driver =  { +		.name = "brcm-systemport", +		.owner = THIS_MODULE, +		.of_match_table = bcm_sysport_of_match, +	}, +}; +module_platform_driver(bcm_sysport_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); +MODULE_ALIAS("platform:brcm-systemport"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h new file mode 100644 index 00000000000..281c0824603 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -0,0 +1,678 @@ +/* + * Broadcom BCM7xxx System Port Ethernet MAC driver + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __BCM_SYSPORT_H +#define __BCM_SYSPORT_H + +#include <linux/if_vlan.h> + +/* Receive/transmit descriptor format */ +#define DESC_ADDR_HI_STATUS_LEN	0x00 +#define  DESC_ADDR_HI_SHIFT	0 +#define  DESC_ADDR_HI_MASK	0xff +#define  DESC_STATUS_SHIFT	8 +#define  DESC_STATUS_MASK	0x3ff +#define  DESC_LEN_SHIFT		18 +#define  DESC_LEN_MASK		0x7fff +#define DESC_ADDR_LO		0x04 + +/* HW supports 40-bit addressing hence the */ +#define DESC_SIZE		(WORDS_PER_DESC * sizeof(u32)) + +/* Default RX buffer allocation size */ +#define RX_BUF_LENGTH		2048 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN	4 +#define ENET_PAD		10 +#define UMAC_MAX_MTU_SIZE	(ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ +				 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) + +/* Transmit status block */ +struct bcm_tsb { +	u32 pcp_dei_vid; +#define PCP_DEI_MASK		0xf +#define VID_SHIFT		4 +#define VID_MASK		0xfff +	u32 l4_ptr_dest_map; +#define L4_CSUM_PTR_MASK	0x1ff +#define L4_PTR_SHIFT		9 +#define L4_PTR_MASK		0x1ff +#define L4_UDP			(1 << 18) +#define L4_LENGTH_VALID		(1 << 19) +#define DEST_MAP_SHIFT		20 +#define DEST_MAP_MASK		0x1ff +}; + +/* Receive status block uses the same + * definitions as the DMA descriptor + */ +struct bcm_rsb { +	u32 rx_status_len; +	u32 brcm_egress_tag; +}; + +/* Common Receive/Transmit status bits */ +#define DESC_L4_CSUM		(1 << 7) +#define DESC_SOP		(1 << 8) +#define DESC_EOP		(1 << 9) + +/* Receive Status bits */ +#define RX_STATUS_UCAST			0 +#define RX_STATUS_BCAST			0x04 +#define RX_STATUS_MCAST			0x08 +#define RX_STATUS_L2_MCAST		0x0c +#define RX_STATUS_ERR			(1 << 4) +#define RX_STATUS_OVFLOW		(1 << 5) +#define RX_STATUS_PARSE_FAIL		(1 << 6) + +/* Transmit Status bits */ +#define TX_STATUS_VLAN_NO_ACT		0x00 +#define TX_STATUS_VLAN_PCP_TSB		0x01 +#define TX_STATUS_VLAN_QUEUE		0x02 +#define TX_STATUS_VLAN_VID_TSB		0x03 +#define TX_STATUS_OWR_CRC		(1 << 2) +#define TX_STATUS_APP_CRC		(1 << 3) +#define TX_STATUS_BRCM_TAG_NO_ACT	0 +#define TX_STATUS_BRCM_TAG_ZERO		0x10 +#define TX_STATUS_BRCM_TAG_ONE_QUEUE	0x20 +#define TX_STATUS_BRCM_TAG_ONE_TSB	0x30 +#define TX_STATUS_SKIP_BYTES		(1 << 6) + +/* Specific register definitions */ +#define SYS_PORT_TOPCTRL_OFFSET		0 +#define REV_CNTL			0x00 +#define  REV_MASK			0xffff + +#define RX_FLUSH_CNTL			0x04 +#define  RX_FLUSH			(1 << 0) + +#define TX_FLUSH_CNTL			0x08 +#define  TX_FLUSH			(1 << 0) + +#define MISC_CNTL			0x0c +#define  SYS_CLK_SEL			(1 << 0) +#define  TDMA_EOP_SEL			(1 << 1) + +/* Level-2 Interrupt controller offsets and defines */ +#define SYS_PORT_INTRL2_0_OFFSET	0x200 +#define SYS_PORT_INTRL2_1_OFFSET	0x240 +#define INTRL2_CPU_STATUS		0x00 +#define INTRL2_CPU_SET			0x04 +#define INTRL2_CPU_CLEAR		0x08 +#define INTRL2_CPU_MASK_STATUS		0x0c +#define INTRL2_CPU_MASK_SET		0x10 +#define INTRL2_CPU_MASK_CLEAR		0x14 + +/* Level-2 instance 0 interrupt bits */ +#define INTRL2_0_GISB_ERR		(1 << 0) +#define INTRL2_0_RBUF_OVFLOW		(1 << 1) +#define INTRL2_0_TBUF_UNDFLOW		(1 << 2) +#define INTRL2_0_MPD			(1 << 3) +#define INTRL2_0_BRCM_MATCH_TAG		(1 << 4) +#define INTRL2_0_RDMA_MBDONE		(1 << 5) +#define INTRL2_0_OVER_MAX_THRESH	(1 << 6) +#define INTRL2_0_BELOW_HYST_THRESH	(1 << 7) +#define INTRL2_0_FREE_LIST_EMPTY	(1 << 8) +#define INTRL2_0_TX_RING_FULL		(1 << 9) +#define INTRL2_0_DESC_ALLOC_ERR		(1 << 10) +#define INTRL2_0_UNEXP_PKTSIZE_ACK	(1 << 11) + +/* RXCHK offset and defines */ +#define SYS_PORT_RXCHK_OFFSET		0x300 + +#define RXCHK_CONTROL			0x00 +#define  RXCHK_EN			(1 << 0) +#define  RXCHK_SKIP_FCS			(1 << 1) +#define  RXCHK_BAD_CSUM_DIS		(1 << 2) +#define  RXCHK_BRCM_TAG_EN		(1 << 3) +#define  RXCHK_BRCM_TAG_MATCH_SHIFT	4 +#define  RXCHK_BRCM_TAG_MATCH_MASK	0xff +#define  RXCHK_PARSE_TNL		(1 << 12) +#define  RXCHK_VIOL_EN			(1 << 13) +#define  RXCHK_VIOL_DIS			(1 << 14) +#define  RXCHK_INCOM_PKT		(1 << 15) +#define  RXCHK_V6_DUPEXT_EN		(1 << 16) +#define  RXCHK_V6_DUPEXT_DIS		(1 << 17) +#define  RXCHK_ETHERTYPE_DIS		(1 << 18) +#define  RXCHK_L2_HDR_DIS		(1 << 19) +#define  RXCHK_L3_HDR_DIS		(1 << 20) +#define  RXCHK_MAC_RX_ERR_DIS		(1 << 21) +#define  RXCHK_PARSE_AUTH		(1 << 22) + +#define RXCHK_BRCM_TAG0			0x04 +#define RXCHK_BRCM_TAG(i)		((i) * RXCHK_BRCM_TAG0) +#define RXCHK_BRCM_TAG0_MASK		0x24 +#define RXCHK_BRCM_TAG_MASK(i)		((i) * RXCHK_BRCM_TAG0_MASK) +#define RXCHK_BRCM_TAG_MATCH_STATUS	0x44 +#define RXCHK_ETHERTYPE			0x48 +#define RXCHK_BAD_CSUM_CNTR		0x4C +#define RXCHK_OTHER_DISC_CNTR		0x50 + +/* TXCHCK offsets and defines */ +#define SYS_PORT_TXCHK_OFFSET		0x380 +#define TXCHK_PKT_RDY_THRESH		0x00 + +/* Receive buffer offset and defines */ +#define SYS_PORT_RBUF_OFFSET		0x400 + +#define RBUF_CONTROL			0x00 +#define  RBUF_RSB_EN			(1 << 0) +#define  RBUF_4B_ALGN			(1 << 1) +#define  RBUF_BRCM_TAG_STRIP		(1 << 2) +#define  RBUF_BAD_PKT_DISC		(1 << 3) +#define  RBUF_RESUME_THRESH_SHIFT	4 +#define  RBUF_RESUME_THRESH_MASK	0xff +#define  RBUF_OK_TO_SEND_SHIFT		12 +#define  RBUF_OK_TO_SEND_MASK		0xff +#define  RBUF_CRC_REPLACE		(1 << 20) +#define  RBUF_OK_TO_SEND_MODE		(1 << 21) +#define  RBUF_RSB_SWAP			(1 << 22) +#define  RBUF_ACPI_EN			(1 << 23) + +#define RBUF_PKT_RDY_THRESH		0x04 + +#define RBUF_STATUS			0x08 +#define  RBUF_WOL_MODE			(1 << 0) +#define  RBUF_MPD			(1 << 1) +#define  RBUF_ACPI			(1 << 2) + +#define RBUF_OVFL_DISC_CNTR		0x0c +#define RBUF_ERR_PKT_CNTR		0x10 + +/* Transmit buffer offset and defines */ +#define SYS_PORT_TBUF_OFFSET		0x600 + +#define TBUF_CONTROL			0x00 +#define  TBUF_BP_EN			(1 << 0) +#define  TBUF_MAX_PKT_THRESH_SHIFT	1 +#define  TBUF_MAX_PKT_THRESH_MASK	0x1f +#define  TBUF_FULL_THRESH_SHIFT		8 +#define  TBUF_FULL_THRESH_MASK		0x1f + +/* UniMAC offset and defines */ +#define SYS_PORT_UMAC_OFFSET		0x800 + +#define UMAC_CMD			0x008 +#define  CMD_TX_EN			(1 << 0) +#define  CMD_RX_EN			(1 << 1) +#define  CMD_SPEED_SHIFT		2 +#define  CMD_SPEED_10			0 +#define  CMD_SPEED_100			1 +#define  CMD_SPEED_1000			2 +#define  CMD_SPEED_2500			3 +#define  CMD_SPEED_MASK			3 +#define  CMD_PROMISC			(1 << 4) +#define  CMD_PAD_EN			(1 << 5) +#define  CMD_CRC_FWD			(1 << 6) +#define  CMD_PAUSE_FWD			(1 << 7) +#define  CMD_RX_PAUSE_IGNORE		(1 << 8) +#define  CMD_TX_ADDR_INS		(1 << 9) +#define  CMD_HD_EN			(1 << 10) +#define  CMD_SW_RESET			(1 << 13) +#define  CMD_LCL_LOOP_EN		(1 << 15) +#define  CMD_AUTO_CONFIG		(1 << 22) +#define  CMD_CNTL_FRM_EN		(1 << 23) +#define  CMD_NO_LEN_CHK			(1 << 24) +#define  CMD_RMT_LOOP_EN		(1 << 25) +#define  CMD_PRBL_EN			(1 << 27) +#define  CMD_TX_PAUSE_IGNORE		(1 << 28) +#define  CMD_TX_RX_EN			(1 << 29) +#define  CMD_RUNT_FILTER_DIS		(1 << 30) + +#define UMAC_MAC0			0x00c +#define UMAC_MAC1			0x010 +#define UMAC_MAX_FRAME_LEN		0x014 + +#define UMAC_TX_FLUSH			0x334 + +#define UMAC_MIB_START			0x400 + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define UMAC_MIB_STAT_OFFSET		0xc + +#define UMAC_MIB_CTRL			0x580 +#define  MIB_RX_CNT_RST			(1 << 0) +#define  MIB_RUNT_CNT_RST		(1 << 1) +#define  MIB_TX_CNT_RST			(1 << 2) +#define UMAC_MDF_CTRL			0x650 +#define UMAC_MDF_ADDR			0x654 + +/* Receive DMA offset and defines */ +#define SYS_PORT_RDMA_OFFSET		0x2000 + +#define RDMA_CONTROL			0x1000 +#define  RDMA_EN			(1 << 0) +#define  RDMA_RING_CFG			(1 << 1) +#define  RDMA_DISC_EN			(1 << 2) +#define  RDMA_BUF_DATA_OFFSET_SHIFT	4 +#define  RDMA_BUF_DATA_OFFSET_MASK	0x3ff + +#define RDMA_STATUS			0x1004 +#define  RDMA_DISABLED			(1 << 0) +#define  RDMA_DESC_RAM_INIT_BUSY	(1 << 1) +#define  RDMA_BP_STATUS			(1 << 2) + +#define RDMA_SCB_BURST_SIZE		0x1008 + +#define RDMA_RING_BUF_SIZE		0x100c +#define  RDMA_RING_SIZE_SHIFT		16 + +#define RDMA_WRITE_PTR_HI		0x1010 +#define RDMA_WRITE_PTR_LO		0x1014 +#define RDMA_PROD_INDEX			0x1018 +#define  RDMA_PROD_INDEX_MASK		0xffff + +#define RDMA_CONS_INDEX			0x101c +#define  RDMA_CONS_INDEX_MASK		0xffff + +#define RDMA_START_ADDR_HI		0x1020 +#define RDMA_START_ADDR_LO		0x1024 +#define RDMA_END_ADDR_HI		0x1028 +#define RDMA_END_ADDR_LO		0x102c + +#define RDMA_MBDONE_INTR		0x1030 +#define  RDMA_INTR_THRESH_MASK		0xff +#define  RDMA_TIMEOUT_SHIFT		16 +#define  RDMA_TIMEOUT_MASK		0xffff + +#define RDMA_XON_XOFF_THRESH		0x1034 +#define  RDMA_XON_XOFF_THRESH_MASK	0xffff +#define  RDMA_XOFF_THRESH_SHIFT		16 + +#define RDMA_READ_PTR_HI		0x1038 +#define RDMA_READ_PTR_LO		0x103c + +#define RDMA_OVERRIDE			0x1040 +#define  RDMA_LE_MODE			(1 << 0) +#define  RDMA_REG_MODE			(1 << 1) + +#define RDMA_TEST			0x1044 +#define  RDMA_TP_OUT_SEL		(1 << 0) +#define  RDMA_MEM_SEL			(1 << 1) + +#define RDMA_DEBUG			0x1048 + +/* Transmit DMA offset and defines */ +#define TDMA_NUM_RINGS			32	/* rings = queues */ +#define TDMA_PORT_SIZE			DESC_SIZE /* two 32-bits words */ + +#define SYS_PORT_TDMA_OFFSET		0x4000 +#define TDMA_WRITE_PORT_OFFSET		0x0000 +#define TDMA_WRITE_PORT_HI(i)		(TDMA_WRITE_PORT_OFFSET + \ +					(i) * TDMA_PORT_SIZE) +#define TDMA_WRITE_PORT_LO(i)		(TDMA_WRITE_PORT_OFFSET + \ +					sizeof(u32) + (i) * TDMA_PORT_SIZE) + +#define TDMA_READ_PORT_OFFSET		(TDMA_WRITE_PORT_OFFSET + \ +					(TDMA_NUM_RINGS * TDMA_PORT_SIZE)) +#define TDMA_READ_PORT_HI(i)		(TDMA_READ_PORT_OFFSET + \ +					(i) * TDMA_PORT_SIZE) +#define TDMA_READ_PORT_LO(i)		(TDMA_READ_PORT_OFFSET + \ +					sizeof(u32) + (i) * TDMA_PORT_SIZE) + +#define TDMA_READ_PORT_CMD_OFFSET	(TDMA_READ_PORT_OFFSET + \ +					(TDMA_NUM_RINGS * TDMA_PORT_SIZE)) +#define TDMA_READ_PORT_CMD(i)		(TDMA_READ_PORT_CMD_OFFSET + \ +					(i) * sizeof(u32)) + +#define TDMA_DESC_RING_00_BASE		(TDMA_READ_PORT_CMD_OFFSET + \ +					(TDMA_NUM_RINGS * sizeof(u32))) + +/* Register offsets and defines relatives to a specific ring number */ +#define RING_HEAD_TAIL_PTR		0x00 +#define  RING_HEAD_MASK			0x7ff +#define  RING_TAIL_SHIFT		11 +#define  RING_TAIL_MASK			0x7ff +#define  RING_FLUSH			(1 << 24) +#define  RING_EN			(1 << 25) + +#define RING_COUNT			0x04 +#define  RING_COUNT_MASK		0x7ff +#define  RING_BUFF_DONE_SHIFT		11 +#define  RING_BUFF_DONE_MASK		0x7ff + +#define RING_MAX_HYST			0x08 +#define  RING_MAX_THRESH_MASK		0x7ff +#define  RING_HYST_THRESH_SHIFT		11 +#define  RING_HYST_THRESH_MASK		0x7ff + +#define RING_INTR_CONTROL		0x0c +#define  RING_INTR_THRESH_MASK		0x7ff +#define  RING_EMPTY_INTR_EN		(1 << 15) +#define  RING_TIMEOUT_SHIFT		16 +#define  RING_TIMEOUT_MASK		0xffff + +#define RING_PROD_CONS_INDEX		0x10 +#define  RING_PROD_INDEX_MASK		0xffff +#define  RING_CONS_INDEX_SHIFT		16 +#define  RING_CONS_INDEX_MASK		0xffff + +#define RING_MAPPING			0x14 +#define  RING_QID_MASK			0x3 +#define  RING_PORT_ID_SHIFT		3 +#define  RING_PORT_ID_MASK		0x7 +#define  RING_IGNORE_STATUS		(1 << 6) +#define  RING_FAILOVER_EN		(1 << 7) +#define  RING_CREDIT_SHIFT		8 +#define  RING_CREDIT_MASK		0xffff + +#define RING_PCP_DEI_VID		0x18 +#define  RING_VID_MASK			0x7ff +#define  RING_DEI			(1 << 12) +#define  RING_PCP_SHIFT			13 +#define  RING_PCP_MASK			0x7 +#define  RING_PKT_SIZE_ADJ_SHIFT	16 +#define  RING_PKT_SIZE_ADJ_MASK		0xf + +#define TDMA_DESC_RING_SIZE		28 + +/* Defininition for a given TX ring base address */ +#define TDMA_DESC_RING_BASE(i)		(TDMA_DESC_RING_00_BASE + \ +					((i) * TDMA_DESC_RING_SIZE)) + +/* Ring indexed register addreses */ +#define TDMA_DESC_RING_HEAD_TAIL_PTR(i)	(TDMA_DESC_RING_BASE(i) + \ +					RING_HEAD_TAIL_PTR) +#define TDMA_DESC_RING_COUNT(i)		(TDMA_DESC_RING_BASE(i) + \ +					RING_COUNT) +#define TDMA_DESC_RING_MAX_HYST(i)	(TDMA_DESC_RING_BASE(i) + \ +					RING_MAX_HYST) +#define TDMA_DESC_RING_INTR_CONTROL(i)	(TDMA_DESC_RING_BASE(i) + \ +					RING_INTR_CONTROL) +#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \ +					(TDMA_DESC_RING_BASE(i) + \ +					RING_PROD_CONS_INDEX) +#define TDMA_DESC_RING_MAPPING(i)	(TDMA_DESC_RING_BASE(i) + \ +					RING_MAPPING) +#define TDMA_DESC_RING_PCP_DEI_VID(i)	(TDMA_DESC_RING_BASE(i) + \ +					RING_PCP_DEI_VID) + +#define TDMA_CONTROL			0x600 +#define  TDMA_EN			(1 << 0) +#define  TSB_EN				(1 << 1) +#define  TSB_SWAP			(1 << 2) +#define  ACB_ALGO			(1 << 3) +#define  BUF_DATA_OFFSET_SHIFT		4 +#define  BUF_DATA_OFFSET_MASK		0x3ff +#define  VLAN_EN			(1 << 14) +#define  SW_BRCM_TAG			(1 << 15) +#define  WNC_KPT_SIZE_UPDATE		(1 << 16) +#define  SYNC_PKT_SIZE			(1 << 17) +#define  ACH_TXDONE_DELAY_SHIFT		18 +#define  ACH_TXDONE_DELAY_MASK		0xff + +#define TDMA_STATUS			0x604 +#define  TDMA_DISABLED			(1 << 0) +#define  TDMA_LL_RAM_INIT_BUSY		(1 << 1) + +#define TDMA_SCB_BURST_SIZE		0x608 +#define TDMA_OVER_MAX_THRESH_STATUS	0x60c +#define TDMA_OVER_HYST_THRESH_STATUS	0x610 +#define TDMA_TPID			0x614 + +#define TDMA_FREE_LIST_HEAD_TAIL_PTR	0x618 +#define  TDMA_FREE_HEAD_MASK		0x7ff +#define  TDMA_FREE_TAIL_SHIFT		11 +#define  TDMA_FREE_TAIL_MASK		0x7ff + +#define TDMA_FREE_LIST_COUNT		0x61c +#define  TDMA_FREE_LIST_COUNT_MASK	0x7ff + +#define TDMA_TIER2_ARB_CTRL		0x620 +#define  TDMA_ARB_MODE_RR		0 +#define  TDMA_ARB_MODE_WEIGHT_RR	0x1 +#define  TDMA_ARB_MODE_STRICT		0x2 +#define  TDMA_ARB_MODE_DEFICIT_RR	0x3 +#define  TDMA_CREDIT_SHIFT		4 +#define  TDMA_CREDIT_MASK		0xffff + +#define TDMA_TIER1_ARB_0_CTRL		0x624 +#define  TDMA_ARB_EN			(1 << 0) + +#define TDMA_TIER1_ARB_0_QUEUE_EN	0x628 +#define TDMA_TIER1_ARB_1_CTRL		0x62c +#define TDMA_TIER1_ARB_1_QUEUE_EN	0x630 +#define TDMA_TIER1_ARB_2_CTRL		0x634 +#define TDMA_TIER1_ARB_2_QUEUE_EN	0x638 +#define TDMA_TIER1_ARB_3_CTRL		0x63c +#define TDMA_TIER1_ARB_3_QUEUE_EN	0x640 + +#define TDMA_SCB_ENDIAN_OVERRIDE	0x644 +#define  TDMA_LE_MODE			(1 << 0) +#define  TDMA_REG_MODE			(1 << 1) + +#define TDMA_TEST			0x648 +#define  TDMA_TP_OUT_SEL		(1 << 0) +#define  TDMA_MEM_TM			(1 << 1) + +#define TDMA_DEBUG			0x64c + +/* Transmit/Receive descriptor */ +struct dma_desc { +	u32	addr_status_len; +	u32	addr_lo; +}; + +/* Number of Receive hardware descriptor words */ +#define NUM_HW_RX_DESC_WORDS		1024 +/* Real number of usable descriptors */ +#define NUM_RX_DESC			(NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC) + +/* Internal linked-list RAM has up to 1536 entries */ +#define NUM_TX_DESC			1536 + +#define WORDS_PER_DESC			(sizeof(struct dma_desc) / sizeof(u32)) + +/* Rx/Tx common counter group.*/ +struct bcm_sysport_pkt_counters { +	u32	cnt_64;		/* RO Received/Transmited 64 bytes packet */ +	u32	cnt_127;	/* RO Rx/Tx 127 bytes packet */ +	u32	cnt_255;	/* RO Rx/Tx 65-255 bytes packet */ +	u32	cnt_511;	/* RO Rx/Tx 256-511 bytes packet */ +	u32	cnt_1023;	/* RO Rx/Tx 512-1023 bytes packet */ +	u32	cnt_1518;	/* RO Rx/Tx 1024-1518 bytes packet */ +	u32	cnt_mgv;	/* RO Rx/Tx 1519-1522 good VLAN packet */ +	u32	cnt_2047;	/* RO Rx/Tx 1522-2047 bytes packet*/ +	u32	cnt_4095;	/* RO Rx/Tx 2048-4095 bytes packet*/ +	u32	cnt_9216;	/* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcm_sysport_rx_counters { +	struct  bcm_sysport_pkt_counters pkt_cnt; +	u32	pkt;		/* RO (0x428) Received pkt count*/ +	u32	bytes;		/* RO Received byte count */ +	u32	mca;		/* RO # of Received multicast pkt */ +	u32	bca;		/* RO # of Receive broadcast pkt */ +	u32	fcs;		/* RO # of Received FCS error  */ +	u32	cf;		/* RO # of Received control frame pkt*/ +	u32	pf;		/* RO # of Received pause frame pkt */ +	u32	uo;		/* RO # of unknown op code pkt */ +	u32	aln;		/* RO # of alignment error count */ +	u32	flr;		/* RO # of frame length out of range count */ +	u32	cde;		/* RO # of code error pkt */ +	u32	fcr;		/* RO # of carrier sense error pkt */ +	u32	ovr;		/* RO # of oversize pkt*/ +	u32	jbr;		/* RO # of jabber count */ +	u32	mtue;		/* RO # of MTU error pkt*/ +	u32	pok;		/* RO # of Received good pkt */ +	u32	uc;		/* RO # of unicast pkt */ +	u32	ppp;		/* RO # of PPP pkt */ +	u32	rcrc;		/* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcm_sysport_tx_counters { +	struct bcm_sysport_pkt_counters pkt_cnt; +	u32	pkts;		/* RO (0x4a8) Transmited pkt */ +	u32	mca;		/* RO # of xmited multicast pkt */ +	u32	bca;		/* RO # of xmited broadcast pkt */ +	u32	pf;		/* RO # of xmited pause frame count */ +	u32	cf;		/* RO # of xmited control frame count */ +	u32	fcs;		/* RO # of xmited FCS error count */ +	u32	ovr;		/* RO # of xmited oversize pkt */ +	u32	drf;		/* RO # of xmited deferral pkt */ +	u32	edf;		/* RO # of xmited Excessive deferral pkt*/ +	u32	scl;		/* RO # of xmited single collision pkt */ +	u32	mcl;		/* RO # of xmited multiple collision pkt*/ +	u32	lcl;		/* RO # of xmited late collision pkt */ +	u32	ecl;		/* RO # of xmited excessive collision pkt*/ +	u32	frg;		/* RO # of xmited fragments pkt*/ +	u32	ncl;		/* RO # of xmited total collision count */ +	u32	jbr;		/* RO # of xmited jabber count*/ +	u32	bytes;		/* RO # of xmited byte count */ +	u32	pok;		/* RO # of xmited good pkt */ +	u32	uc;		/* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcm_sysport_mib { +	struct bcm_sysport_rx_counters rx; +	struct bcm_sysport_tx_counters tx; +	u32 rx_runt_cnt; +	u32 rx_runt_fcs; +	u32 rx_runt_fcs_align; +	u32 rx_runt_bytes; +	u32 rxchk_bad_csum; +	u32 rxchk_other_pkt_disc; +	u32 rbuf_ovflow_cnt; +	u32 rbuf_err_cnt; +}; + +/* HW maintains a large list of counters */ +enum bcm_sysport_stat_type { +	BCM_SYSPORT_STAT_NETDEV = -1, +	BCM_SYSPORT_STAT_MIB_RX, +	BCM_SYSPORT_STAT_MIB_TX, +	BCM_SYSPORT_STAT_RUNT, +	BCM_SYSPORT_STAT_RXCHK, +	BCM_SYSPORT_STAT_RBUF, +}; + +/* Macros to help define ethtool statistics */ +#define STAT_NETDEV(m) { \ +	.stat_string = __stringify(m), \ +	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ +	.stat_offset = offsetof(struct net_device_stats, m), \ +	.type = BCM_SYSPORT_STAT_NETDEV, \ +} + +#define STAT_MIB(str, m, _type) { \ +	.stat_string = str, \ +	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ +	.stat_offset = offsetof(struct bcm_sysport_priv, m), \ +	.type = _type, \ +} + +#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) +#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) +#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) + +#define STAT_RXCHK(str, m, ofs) { \ +	.stat_string = str, \ +	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ +	.stat_offset = offsetof(struct bcm_sysport_priv, m), \ +	.type = BCM_SYSPORT_STAT_RXCHK, \ +	.reg_offset = ofs, \ +} + +#define STAT_RBUF(str, m, ofs) { \ +	.stat_string = str, \ +	.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ +	.stat_offset = offsetof(struct bcm_sysport_priv, m), \ +	.type = BCM_SYSPORT_STAT_RBUF, \ +	.reg_offset = ofs, \ +} + +struct bcm_sysport_stats { +	char stat_string[ETH_GSTRING_LEN]; +	int stat_sizeof; +	int stat_offset; +	enum bcm_sysport_stat_type type; +	/* reg offset from UMAC base for misc counters */ +	u16 reg_offset; +}; + +/* Software house keeping helper structure */ +struct bcm_sysport_cb { +	struct sk_buff	*skb;		/* SKB for RX packets */ +	void __iomem	*bd_addr;	/* Buffer descriptor PHYS addr */ + +	DEFINE_DMA_UNMAP_ADDR(dma_addr); +	DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* Software view of the TX ring */ +struct bcm_sysport_tx_ring { +	spinlock_t	lock;		/* Ring lock for tx reclaim/xmit */ +	struct napi_struct napi;	/* NAPI per tx queue */ +	dma_addr_t	desc_dma;	/* DMA cookie */ +	unsigned int	index;		/* Ring index */ +	unsigned int	size;		/* Ring current size */ +	unsigned int	alloc_size;	/* Ring one-time allocated size */ +	unsigned int	desc_count;	/* Number of descriptors */ +	unsigned int	curr_desc;	/* Current descriptor */ +	unsigned int	c_index;	/* Last consumer index */ +	unsigned int	p_index;	/* Current producer index */ +	struct bcm_sysport_cb *cbs;	/* Transmit control blocks */ +	struct dma_desc	*desc_cpu;	/* CPU view of the descriptor */ +	struct bcm_sysport_priv *priv;	/* private context backpointer */ +}; + +/* Driver private structure */ +struct bcm_sysport_priv { +	void __iomem		*base; +	u32			irq0_stat; +	u32			irq0_mask; +	u32			irq1_stat; +	u32			irq1_mask; +	struct napi_struct	napi ____cacheline_aligned; +	struct net_device	*netdev; +	struct platform_device	*pdev; +	int			irq0; +	int			irq1; + +	/* Transmit rings */ +	struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS]; + +	/* Receive queue */ +	void __iomem		*rx_bds; +	void __iomem		*rx_bd_assign_ptr; +	unsigned int		rx_bd_assign_index; +	struct bcm_sysport_cb	*rx_cbs; +	unsigned int		num_rx_bds; +	unsigned int		rx_read_ptr; +	unsigned int		rx_c_index; + +	/* PHY device */ +	struct device_node	*phy_dn; +	struct phy_device	*phydev; +	phy_interface_t		phy_interface; +	int			old_pause; +	int			old_link; +	int			old_duplex; + +	/* Misc fields */ +	unsigned int		rx_csum_en:1; +	unsigned int		tsb_en:1; +	unsigned int		crc_fwd:1; +	u16			rev; + +	/* MIB related fields */ +	struct bcm_sysport_mib	mib; + +	/* Ethtool */ +	u32			msg_enable; +}; +#endif /* __BCM_SYSPORT_H */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 249468f9536..05c6af6c418 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -96,6 +96,19 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,  	u32 ctl;  	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); +	if (bgmac->core->id.rev >= 4) { +		ctl &= ~BGMAC_DMA_TX_BL_MASK; +		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; + +		ctl &= ~BGMAC_DMA_TX_MR_MASK; +		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT; + +		ctl &= ~BGMAC_DMA_TX_PC_MASK; +		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT; + +		ctl &= ~BGMAC_DMA_TX_PT_MASK; +		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; +	}  	ctl |= BGMAC_DMA_TX_ENABLE;  	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;  	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); @@ -149,6 +162,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,  	dma_desc->ctl0 = cpu_to_le32(ctl0);  	dma_desc->ctl1 = cpu_to_le32(ctl1); +	netdev_sent_queue(net_dev, skb->len); +  	wmb();  	/* Increase ring->end to point empty slot. We tell hardware the first @@ -178,6 +193,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)  	struct device *dma_dev = bgmac->core->dma_dev;  	int empty_slot;  	bool freed = false; +	unsigned bytes_compl = 0, pkts_compl = 0;  	/* The last slot that hardware didn't consume yet */  	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); @@ -195,6 +211,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)  					 slot->skb->len, DMA_TO_DEVICE);  			slot->dma_addr = 0; +			bytes_compl += slot->skb->len; +			pkts_compl++; +  			/* Free memory! :) */  			dev_kfree_skb(slot->skb);  			slot->skb = NULL; @@ -208,6 +227,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)  		freed = true;  	} +	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); +  	if (freed && netif_queue_stopped(bgmac->net_dev))  		netif_wake_queue(bgmac->net_dev);  } @@ -232,6 +253,16 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,  	u32 ctl;  	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); +	if (bgmac->core->id.rev >= 4) { +		ctl &= ~BGMAC_DMA_RX_BL_MASK; +		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; + +		ctl &= ~BGMAC_DMA_RX_PC_MASK; +		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT; + +		ctl &= ~BGMAC_DMA_RX_PT_MASK; +		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; +	}  	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;  	ctl |= BGMAC_DMA_RX_ENABLE;  	ctl |= BGMAC_DMA_RX_PARITY_DISABLE; @@ -244,31 +275,59 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,  				     struct bgmac_slot_info *slot)  {  	struct device *dma_dev = bgmac->core->dma_dev; +	struct sk_buff *skb; +	dma_addr_t dma_addr;  	struct bgmac_rx_header *rx;  	/* Alloc skb */ -	slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); -	if (!slot->skb) +	skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); +	if (!skb)  		return -ENOMEM;  	/* Poison - if everything goes fine, hardware will overwrite it */ -	rx = (struct bgmac_rx_header *)slot->skb->data; +	rx = (struct bgmac_rx_header *)skb->data;  	rx->len = cpu_to_le16(0xdead);  	rx->flags = cpu_to_le16(0xbeef);  	/* Map skb for the DMA */ -	slot->dma_addr = dma_map_single(dma_dev, slot->skb->data, -					BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); -	if (dma_mapping_error(dma_dev, slot->dma_addr)) { +	dma_addr = dma_map_single(dma_dev, skb->data, +				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); +	if (dma_mapping_error(dma_dev, dma_addr)) {  		bgmac_err(bgmac, "DMA mapping error\n"); +		dev_kfree_skb(skb);  		return -ENOMEM;  	} + +	/* Update the slot */ +	slot->skb = skb; +	slot->dma_addr = dma_addr; +  	if (slot->dma_addr & 0xC0000000)  		bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");  	return 0;  } +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, +				    struct bgmac_dma_ring *ring, int desc_idx) +{ +	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; +	u32 ctl0 = 0, ctl1 = 0; + +	if (desc_idx == ring->num_slots - 1) +		ctl0 |= BGMAC_DESC_CTL0_EOT; +	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; +	/* Is there any BGMAC device that requires extension? */ +	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & +	 * B43_DMA64_DCTL1_ADDREXT_MASK; +	 */ + +	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); +	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); +	dma_desc->ctl0 = cpu_to_le32(ctl0); +	dma_desc->ctl1 = cpu_to_le32(ctl1); +} +  static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,  			     int weight)  { @@ -287,7 +346,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,  		struct device *dma_dev = bgmac->core->dma_dev;  		struct bgmac_slot_info *slot = &ring->slots[ring->start];  		struct sk_buff *skb = slot->skb; -		struct sk_buff *new_skb;  		struct bgmac_rx_header *rx;  		u16 len, flags; @@ -300,38 +358,51 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,  		len = le16_to_cpu(rx->len);  		flags = le16_to_cpu(rx->flags); -		/* Check for poison and drop or pass the packet */ -		if (len == 0xdead && flags == 0xbeef) { -			bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", -				  ring->start); -		} else { +		do { +			dma_addr_t old_dma_addr = slot->dma_addr; +			int err; + +			/* Check for poison and drop or pass the packet */ +			if (len == 0xdead && flags == 0xbeef) { +				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", +					  ring->start); +				dma_sync_single_for_device(dma_dev, +							   slot->dma_addr, +							   BGMAC_RX_BUF_SIZE, +							   DMA_FROM_DEVICE); +				break; +			} +  			/* Omit CRC. */  			len -= ETH_FCS_LEN; -			new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len); -			if (new_skb) { -				skb_put(new_skb, len); -				skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET, -								 new_skb->data, -								 len); -				skb_checksum_none_assert(skb); -				new_skb->protocol = -					eth_type_trans(new_skb, bgmac->net_dev); -				netif_receive_skb(new_skb); -				handled++; -			} else { -				bgmac->net_dev->stats.rx_dropped++; -				bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n"); +			/* Prepare new skb as replacement */ +			err = bgmac_dma_rx_skb_for_slot(bgmac, slot); +			if (err) { +				/* Poison the old skb */ +				rx->len = cpu_to_le16(0xdead); +				rx->flags = cpu_to_le16(0xbeef); + +				dma_sync_single_for_device(dma_dev, +							   slot->dma_addr, +							   BGMAC_RX_BUF_SIZE, +							   DMA_FROM_DEVICE); +				break;  			} +			bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); -			/* Poison the old skb */ -			rx->len = cpu_to_le16(0xdead); -			rx->flags = cpu_to_le16(0xbeef); -		} +			/* Unmap old skb, we'll pass it to the netfif */ +			dma_unmap_single(dma_dev, old_dma_addr, +					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); -		/* Make it back accessible to the hardware */ -		dma_sync_single_for_device(dma_dev, slot->dma_addr, -					   BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); +			skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); +			skb_pull(skb, BGMAC_RX_FRAME_OFFSET); + +			skb_checksum_none_assert(skb); +			skb->protocol = eth_type_trans(skb, bgmac->net_dev); +			netif_receive_skb(skb); +			handled++; +		} while (0);  		if (++ring->start >= BGMAC_RX_RING_SLOTS)  			ring->start = 0; @@ -495,8 +566,6 @@ err_dma_free:  static void bgmac_dma_init(struct bgmac *bgmac)  {  	struct bgmac_dma_ring *ring; -	struct bgmac_dma_desc *dma_desc; -	u32 ctl0, ctl1;  	int i;  	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { @@ -529,23 +598,8 @@ static void bgmac_dma_init(struct bgmac *bgmac)  		if (ring->unaligned)  			bgmac_dma_rx_enable(bgmac, ring); -		for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; -		     j++, dma_desc++) { -			ctl0 = ctl1 = 0; - -			if (j == ring->num_slots - 1) -				ctl0 |= BGMAC_DESC_CTL0_EOT; -			ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; -			/* Is there any BGMAC device that requires extension? */ -			/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & -			 * B43_DMA64_DCTL1_ADDREXT_MASK; -			 */ - -			dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr)); -			dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr)); -			dma_desc->ctl0 = cpu_to_le32(ctl0); -			dma_desc->ctl1 = cpu_to_le32(ctl1); -		} +		for (j = 0; j < ring->num_slots; j++) +			bgmac_dma_rx_setup_desc(bgmac, ring, j);  		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,  			    ring->index_base + @@ -651,70 +705,6 @@ static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)  	return 0;  } -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */ -static void bgmac_phy_force(struct bgmac *bgmac) -{ -	u16 ctl; -	u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB | -		     BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX); - -	if (bgmac->phyaddr == BGMAC_PHY_NOREGS) -		return; - -	if (bgmac->autoneg) -		return; - -	ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL); -	ctl &= mask; -	if (bgmac->full_duplex) -		ctl |= BGMAC_PHY_CTL_DUPLEX; -	if (bgmac->speed == BGMAC_SPEED_100) -		ctl |= BGMAC_PHY_CTL_SPEED_100; -	else if (bgmac->speed == BGMAC_SPEED_1000) -		ctl |= BGMAC_PHY_CTL_SPEED_1000; -	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl); -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */ -static void bgmac_phy_advertise(struct bgmac *bgmac) -{ -	u16 adv; - -	if (bgmac->phyaddr == BGMAC_PHY_NOREGS) -		return; - -	if (!bgmac->autoneg) -		return; - -	/* Adv selected 10/100 speeds */ -	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV); -	adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL | -		 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL); -	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) -		adv |= BGMAC_PHY_ADV_10HALF; -	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) -		adv |= BGMAC_PHY_ADV_100HALF; -	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10) -		adv |= BGMAC_PHY_ADV_10FULL; -	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100) -		adv |= BGMAC_PHY_ADV_100FULL; -	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv); - -	/* Adv selected 1000 speeds */ -	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2); -	adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL); -	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) -		adv |= BGMAC_PHY_ADV2_1000HALF; -	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000) -		adv |= BGMAC_PHY_ADV2_1000FULL; -	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv); - -	/* Restart */ -	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, -			bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) | -			BGMAC_PHY_CTL_RESTART); -} -  /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */  static void bgmac_phy_init(struct bgmac *bgmac)  { @@ -758,11 +748,9 @@ static void bgmac_phy_reset(struct bgmac *bgmac)  	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)  		return; -	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, -			BGMAC_PHY_CTL_RESET); +	bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);  	udelay(100); -	if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) & -	    BGMAC_PHY_CTL_RESET) +	if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)  		bgmac_err(bgmac, "PHY reset failed\n");  	bgmac_phy_init(bgmac);  } @@ -780,13 +768,13 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,  	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);  	u32 new_val = (cmdcfg & mask) | set; -	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR); +	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));  	udelay(2);  	if (new_val != cmdcfg || force)  		bgmac_write(bgmac, BGMAC_CMDCFG, new_val); -	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR); +	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));  	udelay(2);  } @@ -845,31 +833,56 @@ static void bgmac_clear_mib(struct bgmac *bgmac)  }  /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */ -static void bgmac_speed(struct bgmac *bgmac, int speed) +static void bgmac_mac_speed(struct bgmac *bgmac)  {  	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);  	u32 set = 0; -	if (speed & BGMAC_SPEED_10) +	switch (bgmac->mac_speed) { +	case SPEED_10:  		set |= BGMAC_CMDCFG_ES_10; -	if (speed & BGMAC_SPEED_100) +		break; +	case SPEED_100:  		set |= BGMAC_CMDCFG_ES_100; -	if (speed & BGMAC_SPEED_1000) +		break; +	case SPEED_1000:  		set |= BGMAC_CMDCFG_ES_1000; -	if (!bgmac->full_duplex) +		break; +	case SPEED_2500: +		set |= BGMAC_CMDCFG_ES_2500; +		break; +	default: +		bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); +	} + +	if (bgmac->mac_duplex == DUPLEX_HALF)  		set |= BGMAC_CMDCFG_HD; +  	bgmac_cmdcfg_maskset(bgmac, mask, set, true);  }  static void bgmac_miiconfig(struct bgmac *bgmac)  { -	u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> -			BGMAC_DS_MM_SHIFT; -	if (imode == 0 || imode == 1) { -		if (bgmac->autoneg) -			bgmac_speed(bgmac, BGMAC_SPEED_100); -		else -			bgmac_speed(bgmac, bgmac->speed); +	struct bcma_device *core = bgmac->core; +	struct bcma_chipinfo *ci = &core->bus->chipinfo; +	u8 imode; + +	if (ci->id == BCMA_CHIP_ID_BCM4707 || +	    ci->id == BCMA_CHIP_ID_BCM53018) { +		bcma_awrite32(core, BCMA_IOCTL, +			      bcma_aread32(core, BCMA_IOCTL) | 0x40 | +			      BGMAC_BCMA_IOCTL_SW_CLKEN); +		bgmac->mac_speed = SPEED_2500; +		bgmac->mac_duplex = DUPLEX_FULL; +		bgmac_mac_speed(bgmac); +	} else { +		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & +			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; +		if (imode == 0 || imode == 1) { +			bgmac->mac_speed = SPEED_100; +			bgmac->mac_duplex = DUPLEX_FULL; +			bgmac_mac_speed(bgmac); +		}  	}  } @@ -879,7 +892,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)  	struct bcma_device *core = bgmac->core;  	struct bcma_bus *bus = core->bus;  	struct bcma_chipinfo *ci = &bus->chipinfo; -	u32 flags = 0; +	u32 flags;  	u32 iost;  	int i; @@ -902,26 +915,36 @@ static void bgmac_chip_reset(struct bgmac *bgmac)  	}  	iost = bcma_aread32(core, BCMA_IOST); -	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) || +	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||  	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || -	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) +	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))  		iost &= ~BGMAC_BCMA_IOST_ATTACHED; -	if (iost & BGMAC_BCMA_IOST_ATTACHED) { -		flags = BGMAC_BCMA_IOCTL_SW_CLKEN; -		if (!bgmac->has_robosw) -			flags |= BGMAC_BCMA_IOCTL_SW_RESET; +	/* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */ +	if (ci->id != BCMA_CHIP_ID_BCM4707) { +		flags = 0; +		if (iost & BGMAC_BCMA_IOST_ATTACHED) { +			flags = BGMAC_BCMA_IOCTL_SW_CLKEN; +			if (!bgmac->has_robosw) +				flags |= BGMAC_BCMA_IOCTL_SW_RESET; +		} +		bcma_core_enable(core, flags);  	} -	bcma_core_enable(core, flags); - -	if (core->id.rev > 2) { -		bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8); -		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24, +	/* Request Misc PLL for corerev > 2 */ +	if (core->id.rev > 2 && +	    ci->id != BCMA_CHIP_ID_BCM4707 && +	    ci->id != BCMA_CHIP_ID_BCM53018) { +		bgmac_set(bgmac, BCMA_CLKCTLST, +			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); +		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, +				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, +				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,  				 1000);  	} -	if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 || +	if (ci->id == BCMA_CHIP_ID_BCM5357 || +	    ci->id == BCMA_CHIP_ID_BCM4749 ||  	    ci->id == BCMA_CHIP_ID_BCM53572) {  		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;  		u8 et_swtype = 0; @@ -936,10 +959,11 @@ static void bgmac_chip_reset(struct bgmac *bgmac)  			et_swtype &= 0x0f;  			et_swtype <<= 4;  			sw_type = et_swtype; -		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) { +		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {  			sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; -		} else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) || -			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) { +		} else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || +			   (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || +			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {  			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |  				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;  		} @@ -976,8 +1000,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)  			     BGMAC_CMDCFG_PROM |  			     BGMAC_CMDCFG_NLC |  			     BGMAC_CMDCFG_CFE | -			     BGMAC_CMDCFG_SR, +			     BGMAC_CMDCFG_SR(core->id.rev),  			     false); +	bgmac->mac_speed = SPEED_UNKNOWN; +	bgmac->mac_duplex = DUPLEX_UNKNOWN;  	bgmac_clear_mib(bgmac);  	if (core->id.id == BCMA_CORE_4706_MAC_GBIT) @@ -988,6 +1014,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)  	bgmac_miiconfig(bgmac);  	bgmac_phy_init(bgmac); +	netdev_reset_queue(bgmac->net_dev); +  	bgmac->int_status = 0;  } @@ -1015,7 +1043,7 @@ static void bgmac_enable(struct bgmac *bgmac)  	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);  	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), -			     BGMAC_CMDCFG_SR, true); +			     BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);  	udelay(2);  	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;  	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); @@ -1044,12 +1072,16 @@ static void bgmac_enable(struct bgmac *bgmac)  		break;  	} -	rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); -	rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; -	bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000; -	mdp = (bp_clk * 128 / 1000) - 3; -	rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); -	bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); +	if (ci->id != BCMA_CHIP_ID_BCM4707 && +	    ci->id != BCMA_CHIP_ID_BCM53018) { +		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); +		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; +		bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / +				1000000; +		mdp = (bp_clk * 128 / 1000) - 3; +		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); +		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); +	}  }  /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ @@ -1075,13 +1107,6 @@ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)  	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); -	if (!bgmac->autoneg) { -		bgmac_speed(bgmac, bgmac->speed); -		bgmac_phy_force(bgmac); -	} else if (bgmac->speed) { /* if there is anything to adv */ -		bgmac_phy_advertise(bgmac); -	} -  	if (full_init) {  		bgmac_dma_init(bgmac);  		if (1) /* FIXME: is there any case we don't want IRQs? */ @@ -1171,6 +1196,8 @@ static int bgmac_open(struct net_device *net_dev)  	}  	napi_enable(&bgmac->napi); +	phy_start(bgmac->phy_dev); +  	netif_carrier_on(net_dev);  err_out: @@ -1183,6 +1210,8 @@ static int bgmac_stop(struct net_device *net_dev)  	netif_carrier_off(net_dev); +	phy_stop(bgmac->phy_dev); +  	napi_disable(&bgmac->napi);  	bgmac_chip_intrs_off(bgmac);  	free_irq(bgmac->core->irq, net_dev); @@ -1219,27 +1248,11 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)  static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)  {  	struct bgmac *bgmac = netdev_priv(net_dev); -	struct mii_ioctl_data *data = if_mii(ifr); - -	switch (cmd) { -	case SIOCGMIIPHY: -		data->phy_id = bgmac->phyaddr; -		/* fallthru */ -	case SIOCGMIIREG: -		if (!netif_running(net_dev)) -			return -EAGAIN; -		data->val_out = bgmac_phy_read(bgmac, data->phy_id, -					       data->reg_num & 0x1f); -		return 0; -	case SIOCSMIIREG: -		if (!netif_running(net_dev)) -			return -EAGAIN; -		bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f, -				data->val_in); -		return 0; -	default: -		return -EOPNOTSUPP; -	} + +	if (!netif_running(net_dev)) +		return -EINVAL; + +	return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);  }  static const struct net_device_ops bgmac_netdev_ops = { @@ -1261,61 +1274,16 @@ static int bgmac_get_settings(struct net_device *net_dev,  {  	struct bgmac *bgmac = netdev_priv(net_dev); -	cmd->supported = SUPPORTED_10baseT_Half | -			 SUPPORTED_10baseT_Full | -			 SUPPORTED_100baseT_Half | -			 SUPPORTED_100baseT_Full | -			 SUPPORTED_1000baseT_Half | -			 SUPPORTED_1000baseT_Full | -			 SUPPORTED_Autoneg; - -	if (bgmac->autoneg) { -		WARN_ON(cmd->advertising); -		if (bgmac->full_duplex) { -			if (bgmac->speed & BGMAC_SPEED_10) -				cmd->advertising |= ADVERTISED_10baseT_Full; -			if (bgmac->speed & BGMAC_SPEED_100) -				cmd->advertising |= ADVERTISED_100baseT_Full; -			if (bgmac->speed & BGMAC_SPEED_1000) -				cmd->advertising |= ADVERTISED_1000baseT_Full; -		} else { -			if (bgmac->speed & BGMAC_SPEED_10) -				cmd->advertising |= ADVERTISED_10baseT_Half; -			if (bgmac->speed & BGMAC_SPEED_100) -				cmd->advertising |= ADVERTISED_100baseT_Half; -			if (bgmac->speed & BGMAC_SPEED_1000) -				cmd->advertising |= ADVERTISED_1000baseT_Half; -		} -	} else { -		switch (bgmac->speed) { -		case BGMAC_SPEED_10: -			ethtool_cmd_speed_set(cmd, SPEED_10); -			break; -		case BGMAC_SPEED_100: -			ethtool_cmd_speed_set(cmd, SPEED_100); -			break; -		case BGMAC_SPEED_1000: -			ethtool_cmd_speed_set(cmd, SPEED_1000); -			break; -		} -	} - -	cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - -	cmd->autoneg = bgmac->autoneg; - -	return 0; +	return phy_ethtool_gset(bgmac->phy_dev, cmd);  } -#if 0  static int bgmac_set_settings(struct net_device *net_dev,  			      struct ethtool_cmd *cmd)  {  	struct bgmac *bgmac = netdev_priv(net_dev); -	return -1; +	return phy_ethtool_sset(bgmac->phy_dev, cmd);  } -#endif  static void bgmac_get_drvinfo(struct net_device *net_dev,  			      struct ethtool_drvinfo *info) @@ -1326,6 +1294,7 @@ static void bgmac_get_drvinfo(struct net_device *net_dev,  static const struct ethtool_ops bgmac_ethtool_ops = {  	.get_settings		= bgmac_get_settings, +	.set_settings		= bgmac_set_settings,  	.get_drvinfo		= bgmac_get_drvinfo,  }; @@ -1344,9 +1313,35 @@ static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,  	return bgmac_phy_write(bus->priv, mii_id, regnum, value);  } +static void bgmac_adjust_link(struct net_device *net_dev) +{ +	struct bgmac *bgmac = netdev_priv(net_dev); +	struct phy_device *phy_dev = bgmac->phy_dev; +	bool update = false; + +	if (phy_dev->link) { +		if (phy_dev->speed != bgmac->mac_speed) { +			bgmac->mac_speed = phy_dev->speed; +			update = true; +		} + +		if (phy_dev->duplex != bgmac->mac_duplex) { +			bgmac->mac_duplex = phy_dev->duplex; +			update = true; +		} +	} + +	if (update) { +		bgmac_mac_speed(bgmac); +		phy_print_status(phy_dev); +	} +} +  static int bgmac_mii_register(struct bgmac *bgmac)  {  	struct mii_bus *mii_bus; +	struct phy_device *phy_dev; +	char bus_id[MII_BUS_ID_SIZE + 3];  	int i, err = 0;  	mii_bus = mdiobus_alloc(); @@ -1378,8 +1373,22 @@ static int bgmac_mii_register(struct bgmac *bgmac)  	bgmac->mii_bus = mii_bus; +	/* Connect to the PHY */ +	snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, +		 bgmac->phyaddr); +	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, +			      PHY_INTERFACE_MODE_MII); +	if (IS_ERR(phy_dev)) { +		bgmac_err(bgmac, "PHY connecton failed\n"); +		err = PTR_ERR(phy_dev); +		goto err_unregister_bus; +	} +	bgmac->phy_dev = phy_dev; +  	return err; +err_unregister_bus: +	mdiobus_unregister(mii_bus);  err_free_irq:  	kfree(mii_bus->irq);  err_free_bus: @@ -1427,16 +1436,13 @@ static int bgmac_probe(struct bcma_device *core)  		return -ENOMEM;  	net_dev->netdev_ops = &bgmac_netdev_ops;  	net_dev->irq = core->irq; -	SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops); +	net_dev->ethtool_ops = &bgmac_ethtool_ops;  	bgmac = netdev_priv(net_dev);  	bgmac->net_dev = net_dev;  	bgmac->core = core;  	bcma_set_drvdata(core, bgmac);  	/* Defaults */ -	bgmac->autoneg = true; -	bgmac->full_duplex = true; -	bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;  	memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);  	/* On BCM4706 we need common core to access PHY */ @@ -1467,6 +1473,27 @@ static int bgmac_probe(struct bcma_device *core)  	bgmac_chip_reset(bgmac); +	/* For Northstar, we have to take all GMAC core out of reset */ +	if (core->id.id == BCMA_CHIP_ID_BCM4707 || +	    core->id.id == BCMA_CHIP_ID_BCM53018) { +		struct bcma_device *ns_core; +		int ns_gmac; + +		/* Northstar has 4 GMAC cores */ +		for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { +			/* As Northstar requirement, we have to reset all GMACs +			 * before accessing one. bgmac_chip_reset() call +			 * bcma_core_enable() for this core. Then the other +			 * three GMACs didn't reset.  We do it here. +			 */ +			ns_core = bcma_find_core_unit(core->bus, +						      BCMA_CORE_MAC_GBIT, +						      ns_gmac); +			if (ns_core && !bcma_core_is_enabled(ns_core)) +				bcma_core_enable(ns_core, 0); +		} +	} +  	err = bgmac_dma_alloc(bgmac);  	if (err) {  		bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); @@ -1491,14 +1518,12 @@ static int bgmac_probe(struct bcma_device *core)  	err = bgmac_mii_register(bgmac);  	if (err) {  		bgmac_err(bgmac, "Cannot register MDIO\n"); -		err = -ENOTSUPP;  		goto err_dma_free;  	}  	err = register_netdev(bgmac->net_dev);  	if (err) {  		bgmac_err(bgmac, "Cannot register net device\n"); -		err = -ENOTSUPP;  		goto err_mii_unregister;  	} diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 66c8afbdc8c..89fa5bc69c5 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -95,7 +95,11 @@  #define  BGMAC_RXQ_CTL_MDP_SHIFT		24  #define BGMAC_GPIO_SELECT			0x194  #define BGMAC_GPIO_OUTPUT_EN			0x198 -/* For 0x1e0 see BCMA_CLKCTLST */ + +/* For 0x1e0 see BCMA_CLKCTLST. Below are BGMAC specific bits */ +#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ	0x00000100 +#define  BGMAC_BCMA_CLKCTLST_MISC_PLL_ST	0x01000000 +  #define BGMAC_HW_WAR				0x1e4  #define BGMAC_PWR_CTL				0x1e8  #define BGMAC_DMA_BASE0				0x200		/* Tx and Rx controller */ @@ -185,6 +189,7 @@  #define   BGMAC_CMDCFG_ES_10			0x00000000  #define   BGMAC_CMDCFG_ES_100			0x00000004  #define   BGMAC_CMDCFG_ES_1000			0x00000008 +#define   BGMAC_CMDCFG_ES_2500			0x0000000C  #define  BGMAC_CMDCFG_PROM			0x00000010	/* Set to activate promiscuous mode */  #define  BGMAC_CMDCFG_PAD_EN			0x00000020  #define  BGMAC_CMDCFG_CF			0x00000040 @@ -193,7 +198,9 @@  #define  BGMAC_CMDCFG_TAI			0x00000200  #define  BGMAC_CMDCFG_HD			0x00000400	/* Set if in half duplex mode */  #define  BGMAC_CMDCFG_HD_SHIFT			10 -#define  BGMAC_CMDCFG_SR			0x00000800	/* Set to reset mode */ +#define  BGMAC_CMDCFG_SR_REV0			0x00000800	/* Set to reset mode, for other revs */ +#define  BGMAC_CMDCFG_SR_REV4			0x00002000	/* Set to reset mode, only for core rev 4 */ +#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)  #define  BGMAC_CMDCFG_ML			0x00008000	/* Set to activate mac loopback mode */  #define  BGMAC_CMDCFG_AE			0x00400000  #define  BGMAC_CMDCFG_CFE			0x00800000 @@ -216,27 +223,6 @@  #define BGMAC_RX_STATUS				0xb38  #define BGMAC_TX_STATUS				0xb3c -#define BGMAC_PHY_CTL				0x00 -#define  BGMAC_PHY_CTL_SPEED_MSB		0x0040 -#define  BGMAC_PHY_CTL_DUPLEX			0x0100		/* duplex mode */ -#define  BGMAC_PHY_CTL_RESTART			0x0200		/* restart autonegotiation */ -#define  BGMAC_PHY_CTL_ANENAB			0x1000		/* enable autonegotiation */ -#define  BGMAC_PHY_CTL_SPEED			0x2000 -#define  BGMAC_PHY_CTL_LOOP			0x4000		/* loopback */ -#define  BGMAC_PHY_CTL_RESET			0x8000		/* reset */ -/* Helpers */ -#define  BGMAC_PHY_CTL_SPEED_10			0 -#define  BGMAC_PHY_CTL_SPEED_100		BGMAC_PHY_CTL_SPEED -#define  BGMAC_PHY_CTL_SPEED_1000		BGMAC_PHY_CTL_SPEED_MSB -#define BGMAC_PHY_ADV				0x04 -#define  BGMAC_PHY_ADV_10HALF			0x0020		/* advertise 10MBits/s half duplex */ -#define  BGMAC_PHY_ADV_10FULL			0x0040		/* advertise 10MBits/s full duplex */ -#define  BGMAC_PHY_ADV_100HALF			0x0080		/* advertise 100MBits/s half duplex */ -#define  BGMAC_PHY_ADV_100FULL			0x0100		/* advertise 100MBits/s full duplex */ -#define BGMAC_PHY_ADV2				0x09 -#define  BGMAC_PHY_ADV2_1000HALF		0x0100		/* advertise 1000MBits/s half duplex */ -#define  BGMAC_PHY_ADV2_1000FULL		0x0200		/* advertise 1000MBits/s full duplex */ -  /* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */  #define BGMAC_BCMA_IOCTL_SW_CLKEN		0x00000004	/* PHY Clock Enable */  #define BGMAC_BCMA_IOCTL_SW_RESET		0x00000008	/* PHY Reset */ @@ -254,9 +240,34 @@  #define  BGMAC_DMA_TX_SUSPEND			0x00000002  #define  BGMAC_DMA_TX_LOOPBACK			0x00000004  #define  BGMAC_DMA_TX_FLUSH			0x00000010 +#define  BGMAC_DMA_TX_MR_MASK			0x000000C0	/* Multiple outstanding reads */ +#define  BGMAC_DMA_TX_MR_SHIFT			6 +#define   BGMAC_DMA_TX_MR_1			0 +#define   BGMAC_DMA_TX_MR_2			1  #define  BGMAC_DMA_TX_PARITY_DISABLE		0x00000800  #define  BGMAC_DMA_TX_ADDREXT_MASK		0x00030000  #define  BGMAC_DMA_TX_ADDREXT_SHIFT		16 +#define  BGMAC_DMA_TX_BL_MASK			0x001C0000	/* BurstLen bits */ +#define  BGMAC_DMA_TX_BL_SHIFT			18 +#define   BGMAC_DMA_TX_BL_16			0 +#define   BGMAC_DMA_TX_BL_32			1 +#define   BGMAC_DMA_TX_BL_64			2 +#define   BGMAC_DMA_TX_BL_128			3 +#define   BGMAC_DMA_TX_BL_256			4 +#define   BGMAC_DMA_TX_BL_512			5 +#define   BGMAC_DMA_TX_BL_1024			6 +#define  BGMAC_DMA_TX_PC_MASK			0x00E00000	/* Prefetch control */ +#define  BGMAC_DMA_TX_PC_SHIFT			21 +#define   BGMAC_DMA_TX_PC_0			0 +#define   BGMAC_DMA_TX_PC_4			1 +#define   BGMAC_DMA_TX_PC_8			2 +#define   BGMAC_DMA_TX_PC_16			3 +#define  BGMAC_DMA_TX_PT_MASK			0x03000000	/* Prefetch threshold */ +#define  BGMAC_DMA_TX_PT_SHIFT			24 +#define   BGMAC_DMA_TX_PT_1			0 +#define   BGMAC_DMA_TX_PT_2			1 +#define   BGMAC_DMA_TX_PT_4			2 +#define   BGMAC_DMA_TX_PT_8			3  #define BGMAC_DMA_TX_INDEX			0x04  #define BGMAC_DMA_TX_RINGLO			0x08  #define BGMAC_DMA_TX_RINGHI			0x0C @@ -284,8 +295,33 @@  #define  BGMAC_DMA_RX_DIRECT_FIFO		0x00000100  #define  BGMAC_DMA_RX_OVERFLOW_CONT		0x00000400  #define  BGMAC_DMA_RX_PARITY_DISABLE		0x00000800 +#define  BGMAC_DMA_RX_MR_MASK			0x000000C0	/* Multiple outstanding reads */ +#define  BGMAC_DMA_RX_MR_SHIFT			6 +#define   BGMAC_DMA_TX_MR_1			0 +#define   BGMAC_DMA_TX_MR_2			1  #define  BGMAC_DMA_RX_ADDREXT_MASK		0x00030000  #define  BGMAC_DMA_RX_ADDREXT_SHIFT		16 +#define  BGMAC_DMA_RX_BL_MASK			0x001C0000	/* BurstLen bits */ +#define  BGMAC_DMA_RX_BL_SHIFT			18 +#define   BGMAC_DMA_RX_BL_16			0 +#define   BGMAC_DMA_RX_BL_32			1 +#define   BGMAC_DMA_RX_BL_64			2 +#define   BGMAC_DMA_RX_BL_128			3 +#define   BGMAC_DMA_RX_BL_256			4 +#define   BGMAC_DMA_RX_BL_512			5 +#define   BGMAC_DMA_RX_BL_1024			6 +#define  BGMAC_DMA_RX_PC_MASK			0x00E00000	/* Prefetch control */ +#define  BGMAC_DMA_RX_PC_SHIFT			21 +#define   BGMAC_DMA_RX_PC_0			0 +#define   BGMAC_DMA_RX_PC_4			1 +#define   BGMAC_DMA_RX_PC_8			2 +#define   BGMAC_DMA_RX_PC_16			3 +#define  BGMAC_DMA_RX_PT_MASK			0x03000000	/* Prefetch threshold */ +#define  BGMAC_DMA_RX_PT_SHIFT			24 +#define   BGMAC_DMA_RX_PT_1			0 +#define   BGMAC_DMA_RX_PT_2			1 +#define   BGMAC_DMA_RX_PT_4			2 +#define   BGMAC_DMA_RX_PT_8			3  #define BGMAC_DMA_RX_INDEX			0x24  #define BGMAC_DMA_RX_RINGLO			0x28  #define BGMAC_DMA_RX_RINGHI			0x2C @@ -342,10 +378,6 @@  #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII		0x000000C0  #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS		0x00010000 -#define BGMAC_SPEED_10				0x0001 -#define BGMAC_SPEED_100				0x0002 -#define BGMAC_SPEED_1000			0x0004 -  #define BGMAC_WEIGHT	64  #define ETHER_MAX_LEN   1518 @@ -402,6 +434,7 @@ struct bgmac {  	struct net_device *net_dev;  	struct napi_struct napi;  	struct mii_bus *mii_bus; +	struct phy_device *phy_dev;  	/* DMA */  	struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; @@ -416,10 +449,9 @@ struct bgmac {  	u32 int_mask;  	u32 int_status; -	/* Speed-related */ -	int speed; -	bool autoneg; -	bool full_duplex; +	/* Current MAC state */ +	int mac_speed; +	int mac_duplex;  	u8 phyaddr;  	bool has_robosw; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e838a3f74b6..67d2b004737 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -23,7 +23,6 @@  #include <linux/vmalloc.h>  #include <linux/interrupt.h>  #include <linux/pci.h> -#include <linux/init.h>  #include <linux/netdevice.h>  #include <linux/etherdevice.h>  #include <linux/skbuff.h> @@ -58,8 +57,8 @@  #include "bnx2_fw.h"  #define DRV_MODULE_NAME		"bnx2" -#define DRV_MODULE_VERSION	"2.2.4" -#define DRV_MODULE_RELDATE	"Aug 05, 2013" +#define DRV_MODULE_VERSION	"2.2.5" +#define DRV_MODULE_RELDATE	"December 20, 2013"  #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"  #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"  #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw" @@ -86,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);  static int disable_msi = 0; -module_param(disable_msi, int, 0); +module_param(disable_msi, int, S_IRUGO);  MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");  typedef enum { @@ -1197,6 +1196,8 @@ bnx2_copper_linkup(struct bnx2 *bp)  {  	u32 bmcr; +	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX; +  	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);  	if (bmcr & BMCR_ANENABLE) {  		u32 local_adv, remote_adv, common; @@ -1255,6 +1256,14 @@ bnx2_copper_linkup(struct bnx2 *bp)  		}  	} +	if (bp->link_up) { +		u32 ext_status; + +		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status); +		if (ext_status & EXT_STATUS_MDIX) +			bp->phy_flags |= BNX2_PHY_FLAG_MDIX; +	} +  	return 0;  } @@ -2048,29 +2057,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp)  __releases(&bp->phy_lock)  __acquires(&bp->phy_lock)  { -	u32 bmcr; +	u32 bmcr, adv_reg, new_adv = 0;  	u32 new_bmcr;  	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); +	bnx2_read_phy(bp, bp->mii_adv, &adv_reg); +	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | +		    ADVERTISE_PAUSE_ASYM); + +	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising); +  	if (bp->autoneg & AUTONEG_SPEED) { -		u32 adv_reg, adv1000_reg; -		u32 new_adv = 0; +		u32 adv1000_reg;  		u32 new_adv1000 = 0; -		bnx2_read_phy(bp, bp->mii_adv, &adv_reg); -		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | -			ADVERTISE_PAUSE_ASYM); +		new_adv |= bnx2_phy_get_pause_adv(bp);  		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);  		adv1000_reg &= PHY_ALL_1000_SPEED; -		new_adv = ethtool_adv_to_mii_adv_t(bp->advertising); -		new_adv |= ADVERTISE_CSMA; -		new_adv |= bnx2_phy_get_pause_adv(bp); -  		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); -  		if ((adv1000_reg != new_adv1000) ||  			(adv_reg != new_adv) ||  			((bmcr & BMCR_ANENABLE) == 0)) { @@ -2090,6 +2097,10 @@ __acquires(&bp->phy_lock)  		return 0;  	} +	/* advertise nothing when forcing speed */ +	if (adv_reg != new_adv) +		bnx2_write_phy(bp, bp->mii_adv, new_adv); +  	new_bmcr = 0;  	if (bp->req_line_speed == SPEED_100) {  		new_bmcr |= BMCR_SPEED100; @@ -2341,9 +2352,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)  	}  	/* ethernet@wirespeed */ -	bnx2_write_phy(bp, 0x18, 0x7007); -	bnx2_read_phy(bp, 0x18, &val); -	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); +	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL); +	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val); +	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED; + +	/* auto-mdix */ +	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) +		val |=  AUX_CTL_MISC_CTL_AUTOMDIX; + +	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);  	return 0;  } @@ -2490,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)  	bp->fw_wr_seq++;  	msg_data |= bp->fw_wr_seq; +	bp->fw_last_msg = msg_data;  	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); @@ -2868,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)  		sw_cons = BNX2_NEXT_TX_BD(sw_cons);  		tx_bytes += skb->len; -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		tx_pkt++;  		if (tx_pkt == budget)  			break; @@ -3115,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)  	struct l2_fhdr *rx_hdr;  	int rx_pkt = 0, pg_ring_used = 0; +	if (budget <= 0) +		return rx_pkt; +  	hw_cons = bnx2_get_hw_rx_cons(bnapi);  	sw_cons = rxr->rx_cons;  	sw_prod = rxr->rx_prod; @@ -3234,7 +3255,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)  		if ((bp->dev->features & NETIF_F_RXHASH) &&  		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==  		     L2_FHDR_STATUS_USE_RXHASH)) -			skb->rxhash = rx_hdr->l2_fhdr_hash; +			skb_set_hash(skb, rx_hdr->l2_fhdr_hash, +				     PKT_HASH_TYPE_L3);  		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);  		napi_gro_receive(&bnapi->napi, skb); @@ -3982,8 +4004,23 @@ bnx2_setup_wol(struct bnx2 *bp)  			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;  	} -	if (!(bp->flags & BNX2_FLAG_NO_WOL)) -		bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); +	if (!(bp->flags & BNX2_FLAG_NO_WOL)) { +		u32 val; + +		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; +		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { +			bnx2_fw_sync(bp, wol_msg, 1, 0); +			return; +		} +		/* Tell firmware not to power down the PHY yet, otherwise +		 * the chip will take a long time to respond to MMIO reads. +		 */ +		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); +		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, +			      val | BNX2_PORT_FEATURE_ASF_ENABLED); +		bnx2_fw_sync(bp, wol_msg, 1, 0); +		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); +	}  } @@ -4015,9 +4052,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)  			if (bp->wol)  				pci_set_power_state(bp->pdev, PCI_D3hot); -		} else { -			pci_set_power_state(bp->pdev, PCI_D3hot); +			break; + +		} +		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { +			u32 val; + +			/* Tell firmware not to power down the PHY yet, +			 * otherwise the other port may not respond to +			 * MMIO reads. +			 */ +			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); +			val &= ~BNX2_CONDITION_PM_STATE_MASK; +			val |= BNX2_CONDITION_PM_STATE_UNPREP; +			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);  		} +		pci_set_power_state(bp->pdev, PCI_D3hot);  		/* No more memory access after this point until  		 * device is brought back to D0. @@ -5761,8 +5811,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)  	if (!skb)  		return -ENOMEM;  	packet = skb_put(skb, pkt_size); -	memcpy(packet, bp->dev->dev_addr, 6); -	memset(packet + 6, 0x0, 8); +	memcpy(packet, bp->dev->dev_addr, ETH_ALEN); +	memset(packet + ETH_ALEN, 0x0, 8);  	for (i = 14; i < pkt_size; i++)  		packet[i] = (unsigned char) (i & 0xff); @@ -6188,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)  static void  bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)  { -	int i, total_vecs, rc; +	int i, total_vecs;  	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];  	struct net_device *dev = bp->dev;  	const int len = sizeof(bp->irq_tbl[0].name); @@ -6211,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)  #ifdef BCM_CNIC  	total_vecs++;  #endif -	rc = -ENOSPC; -	while (total_vecs >= BNX2_MIN_MSIX_VEC) { -		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); -		if (rc <= 0) -			break; -		if (rc > 0) -			total_vecs = rc; -	} - -	if (rc != 0) +	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, +					   BNX2_MIN_MSIX_VEC, total_vecs); +	if (total_vecs < 0)  		return;  	msix_vecs = total_vecs; @@ -6593,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)  	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);  	if (dma_mapping_error(&bp->pdev->dev, mapping)) { -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		return NETDEV_TX_OK;  	} @@ -6686,7 +6729,7 @@ dma_error:  			       PCI_DMA_TODEVICE);  	} -	dev_kfree_skb(skb); +	dev_kfree_skb_any(skb);  	return NETDEV_TX_OK;  } @@ -6865,10 +6908,16 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  	if (netif_carrier_ok(dev)) {  		ethtool_cmd_speed_set(cmd, bp->line_speed);  		cmd->duplex = bp->duplex; +		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) { +			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX) +				cmd->eth_tp_mdix = ETH_TP_MDI_X; +			else +				cmd->eth_tp_mdix = ETH_TP_MDI; +		}  	}  	else { -		ethtool_cmd_speed_set(cmd, -1); -		cmd->duplex = -1; +		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); +		cmd->duplex = DUPLEX_UNKNOWN;  	}  	spin_unlock_bh(&bp->phy_lock); @@ -8413,7 +8462,6 @@ err_out_release:  err_out_disable:  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  err_out:  	return rc; @@ -8514,7 +8562,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  	pci_set_drvdata(pdev, dev); -	memcpy(dev->dev_addr, bp->mac_addr, 6); +	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);  	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |  		NETIF_F_TSO | NETIF_F_TSO_ECN | @@ -8546,7 +8594,6 @@ error:  	pci_iounmap(pdev, bp->regview);  	pci_release_regions(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  err_free:  	free_netdev(dev);  	return rc; @@ -8578,9 +8625,9 @@ bnx2_remove_one(struct pci_dev *pdev)  	pci_release_regions(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  } +#ifdef CONFIG_PM_SLEEP  static int  bnx2_suspend(struct device *device)  { @@ -8619,7 +8666,6 @@ bnx2_resume(struct device *device)  	return 0;  } -#ifdef CONFIG_PM_SLEEP  static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);  #define BNX2_PM_OPS (&bnx2_pm_ops) diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index 18cb2d23e56..e341bc366fa 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -6471,6 +6471,15 @@ struct l2_fhdr {  #define BCM5708S_TX_ACTL3			0x17 +#define MII_BNX2_EXT_STATUS			0x11 +#define EXT_STATUS_MDIX				 (1 << 13) + +#define MII_BNX2_AUX_CTL			0x18 +#define AUX_CTL_MISC_CTL			 0x7007 +#define AUX_CTL_MISC_CTL_WIRESPEED		  (1 << 4) +#define AUX_CTL_MISC_CTL_AUTOMDIX		  (1 << 9) +#define AUX_CTL_MISC_CTL_WR			  (1 << 15) +  #define MII_BNX2_DSP_RW_PORT			0x15  #define MII_BNX2_DSP_ADDRESS			0x17  #define MII_BNX2_DSP_EXPAND_REG			 0x0f00 @@ -6844,6 +6853,7 @@ struct bnx2 {  #define BNX2_PHY_FLAG_REMOTE_PHY_CAP		0x00000800  #define BNX2_PHY_FLAG_FORCED_DOWN		0x00001000  #define BNX2_PHY_FLAG_NO_PARALLEL		0x00002000 +#define BNX2_PHY_FLAG_MDIX			0x00004000  	u32			mii_bmcr;  	u32			mii_bmsr; @@ -6890,6 +6900,7 @@ struct bnx2 {  	u16			fw_wr_seq;  	u16			fw_drv_pulse_wr_seq; +	u32			fw_last_msg;  	int			rx_max_ring;  	int			rx_ring_size; @@ -7396,6 +7407,10 @@ struct bnx2_rv2p_fw_file {  #define BNX2_CONDITION_MFW_RUN_NCSI		 0x00006000  #define BNX2_CONDITION_MFW_RUN_NONE		 0x0000e000  #define BNX2_CONDITION_MFW_RUN_MASK		 0x0000e000 +#define BNX2_CONDITION_PM_STATE_MASK		 0x00030000 +#define BNX2_CONDITION_PM_STATE_FULL		 0x00030000 +#define BNX2_CONDITION_PM_STATE_PREP		 0x00020000 +#define BNX2_CONDITION_PM_STATE_UNPREP		 0x00010000  #define BNX2_BC_STATE_DEBUG_CMD			0x1dc  #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE	 0x42440000 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 97b3d32a98b..8206a293e6b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   */ @@ -26,8 +26,8 @@   * (you will need to reboot afterwards) */  /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION      "1.78.17-0" -#define DRV_MODULE_RELDATE      "2013/04/11" +#define DRV_MODULE_VERSION      "1.78.19-0" +#define DRV_MODULE_RELDATE      "2014/02/10"  #define BNX2X_BC_VER            0x040200  #if defined(CONFIG_DCB) @@ -75,13 +75,22 @@ enum bnx2x_int_mode {  #define BNX2X_MSG_DCB			0x8000000  /* regular debug print */ +#define DP_INNER(fmt, ...)					\ +	pr_notice("[%s:%d(%s)]" fmt,				\ +		  __func__, __LINE__,				\ +		  bp->dev ? (bp->dev->name) : "?",		\ +		  ##__VA_ARGS__); +  #define DP(__mask, fmt, ...)					\  do {								\  	if (unlikely(bp->msg_enable & (__mask)))		\ -		pr_notice("[%s:%d(%s)]" fmt,			\ -			  __func__, __LINE__,			\ -			  bp->dev ? (bp->dev->name) : "?",	\ -			  ##__VA_ARGS__);			\ +		DP_INNER(fmt, ##__VA_ARGS__);			\ +} while (0) + +#define DP_AND(__mask, fmt, ...)				\ +do {								\ +	if (unlikely((bp->msg_enable & (__mask)) == __mask))	\ +		DP_INNER(fmt, ##__VA_ARGS__);			\  } while (0)  #define DP_CONT(__mask, fmt, ...)				\ @@ -337,6 +346,7 @@ struct sw_tx_bd {  	u8		flags;  /* Set on the first BD descriptor when there is a split BD */  #define BNX2X_TSO_SPLIT_BD		(1<<0) +#define BNX2X_HAS_SECOND_PBD		(1<<1)  };  struct sw_rx_page { @@ -472,7 +482,7 @@ struct bnx2x_agg_info {  	u16			vlan_tag;  	u16			len_on_bd;  	u32			rxhash; -	bool			l4_rxhash; +	enum pkt_hash_types	rxhash_type;  	u16			gro_size;  	u16			full_page;  }; @@ -520,10 +530,12 @@ struct bnx2x_fastpath {  #define BNX2X_FP_STATE_IDLE		      0  #define BNX2X_FP_STATE_NAPI		(1 << 0)    /* NAPI owns this FP */  #define BNX2X_FP_STATE_POLL		(1 << 1)    /* poll owns this FP */ -#define BNX2X_FP_STATE_NAPI_YIELD	(1 << 2)    /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD	(1 << 3)    /* poll yielded this FP */ +#define BNX2X_FP_STATE_DISABLED		(1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD	(1 << 3)    /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD	(1 << 4)    /* poll yielded this FP */ +#define BNX2X_FP_OWNED	(BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)  #define BNX2X_FP_YIELD	(BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED	(BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_LOCKED	(BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)  #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)  	/* protect state */  	spinlock_t lock; @@ -613,7 +625,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)  {  	bool rc = true; -	spin_lock(&fp->lock); +	spin_lock_bh(&fp->lock);  	if (fp->state & BNX2X_FP_LOCKED) {  		WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);  		fp->state |= BNX2X_FP_STATE_NAPI_YIELD; @@ -622,7 +634,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)  		/* we don't care if someone yielded */  		fp->state = BNX2X_FP_STATE_NAPI;  	} -	spin_unlock(&fp->lock); +	spin_unlock_bh(&fp->lock);  	return rc;  } @@ -631,14 +643,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)  {  	bool rc = false; -	spin_lock(&fp->lock); +	spin_lock_bh(&fp->lock);  	WARN_ON(fp->state &  		(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));  	if (fp->state & BNX2X_FP_STATE_POLL_YIELD)  		rc = true; -	fp->state = BNX2X_FP_STATE_IDLE; -	spin_unlock(&fp->lock); + +	/* state ==> idle, unless currently disabled */ +	fp->state &= BNX2X_FP_STATE_DISABLED; +	spin_unlock_bh(&fp->lock);  	return rc;  } @@ -669,7 +683,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)  	if (fp->state & BNX2X_FP_STATE_POLL_YIELD)  		rc = true; -	fp->state = BNX2X_FP_STATE_IDLE; + +	/* state ==> idle, unless currently disabled */ +	fp->state &= BNX2X_FP_STATE_DISABLED;  	spin_unlock_bh(&fp->lock);  	return rc;  } @@ -677,9 +693,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)  /* true if a socket is polling, even if it did not get the lock */  static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)  { -	WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); +	WARN_ON(!(fp->state & BNX2X_FP_OWNED));  	return fp->state & BNX2X_FP_USER_PEND;  } + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ +	int rc = true; + +	spin_lock_bh(&fp->lock); +	if (fp->state & BNX2X_FP_OWNED) +		rc = false; +	fp->state |= BNX2X_FP_STATE_DISABLED; +	spin_unlock_bh(&fp->lock); + +	return rc; +}  #else  static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)  { @@ -709,6 +739,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)  {  	return false;  } +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ +	return true; +}  #endif /* CONFIG_NET_RX_BUSY_POLL */  /* Use 2500 as a mini-jumbo MTU for FCoE */ @@ -1122,10 +1156,6 @@ struct bnx2x_port {  			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)  /* slow path */ - -/* slow path work-queue */ -extern struct workqueue_struct *bnx2x_wq; -  #define BNX2X_MAX_NUM_OF_VFS	64  #define BNX2X_VF_CID_WND	4 /* log num of queues per VF. HW config. */  #define BNX2X_CIDS_PER_VF	(1 << BNX2X_VF_CID_WND) @@ -1197,8 +1227,9 @@ union cdu_context {  /* TM (timers) host DB constants */  #define TM_ILT_PAGE_SZ_HW	0  #define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ -/* #define TM_CONN_NUM		(CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ -#define TM_CONN_NUM		1024 +#define TM_CONN_NUM		(BNX2X_FIRST_VF_CID + \ +				 BNX2X_VF_CIDS + \ +				 CNIC_ISCSI_CID_MAX)  #define TM_ILT_SZ		(8 * TM_CONN_NUM)  #define TM_ILT_LINES		DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) @@ -1236,6 +1267,7 @@ struct bnx2x_slowpath {  	union {  		struct client_init_ramrod_data  init_data;  		struct client_update_ramrod_data update_data; +		struct tpa_update_ramrod_data tpa_data;  	} q_rdata;  	union { @@ -1249,7 +1281,10 @@ struct bnx2x_slowpath {  	 * Therefore, if they would have been defined in the same union,  	 * data can get corrupted.  	 */ -	struct afex_vif_list_ramrod_data func_afex_rdata; +	union { +		struct afex_vif_list_ramrod_data	viflist_data; +		struct function_update_data		func_update; +	} func_afex_rdata;  	/* used by dmae command executer */  	struct dmae_command		dmae[MAX_DMAE_C]; @@ -1364,7 +1399,7 @@ struct bnx2x_fw_stats_data {  };  /* Public slow path states */ -enum { +enum sp_rtnl_flag {  	BNX2X_SP_RTNL_SETUP_TC,  	BNX2X_SP_RTNL_TX_TIMEOUT,  	BNX2X_SP_RTNL_FAN_FAILURE, @@ -1375,7 +1410,12 @@ enum {  	BNX2X_SP_RTNL_RX_MODE,  	BNX2X_SP_RTNL_HYPERVISOR_VLAN,  	BNX2X_SP_RTNL_TX_STOP, -	BNX2X_SP_RTNL_TX_RESUME, +	BNX2X_SP_RTNL_GET_DRV_VERSION, +}; + +enum bnx2x_iov_flag { +	BNX2X_IOV_HANDLE_VF_MSG, +	BNX2X_IOV_HANDLE_FLR,  };  struct bnx2x_prev_path_list { @@ -1527,7 +1567,6 @@ struct bnx2x {  #define PCI_32BIT_FLAG			(1 << 1)  #define ONE_PORT_FLAG			(1 << 2)  #define NO_WOL_FLAG			(1 << 3) -#define USING_DAC_FLAG			(1 << 4)  #define USING_MSIX_FLAG			(1 << 5)  #define USING_MSI_FLAG			(1 << 6)  #define DISABLE_MSI_FLAG		(1 << 7) @@ -1540,12 +1579,15 @@ struct bnx2x {  #define NO_ISCSI_FLAG			(1 << 14)  #define NO_FCOE_FLAG			(1 << 15)  #define BC_SUPPORTS_PFC_STATS		(1 << 17) +#define TX_SWITCHING			(1 << 18)  #define BC_SUPPORTS_FCOE_FEATURES	(1 << 19)  #define USING_SINGLE_MSIX_FLAG		(1 << 20)  #define BC_SUPPORTS_DCBX_MSG_NON_PMF	(1 << 21)  #define IS_VF_FLAG			(1 << 22)  #define INTERRUPTS_ENABLED_FLAG		(1 << 23)  #define BC_SUPPORTS_RMMOD_CMD		(1 << 24) +#define HAS_PHYS_PORT_ID		(1 << 25) +#define AER_ENABLED			(1 << 26)  #define BP_NOMCP(bp)			((bp)->flags & NO_MCP_FLAG) @@ -1574,6 +1616,8 @@ struct bnx2x {  	int			mrrs;  	struct delayed_work	sp_task; +	struct delayed_work	iov_task; +  	atomic_t		interrupt_occurred;  	struct delayed_work	sp_rtnl_task; @@ -1621,7 +1665,7 @@ struct bnx2x {  	u16			rx_ticks_int;  	u16			rx_ticks;  /* Maximal coalescing timeout in us */ -#define BNX2X_MAX_COALESCE_TOUT		(0xf0*12) +#define BNX2X_MAX_COALESCE_TOUT		(0xff*BNX2X_BTR)  	u32			lin_cnt; @@ -1664,6 +1708,10 @@ struct bnx2x {  	struct bnx2x_slowpath	*slowpath;  	dma_addr_t		slowpath_mapping; +	/* Mechanism protecting the drv_info_to_mcp */ +	struct mutex		drv_info_mutex; +	bool			drv_info_mng_owner; +  	/* Total number of FW statistics requests */  	u8			fw_stats_num; @@ -1853,6 +1901,9 @@ struct bnx2x {  	/* operation indication for the sp_rtnl task */  	unsigned long				sp_rtnl_state; +	/* Indication of the IOV tasks */ +	unsigned long				iov_task_state; +  	/* DCBX Negotiation results */  	struct dcbx_features			dcbx_local_feat;  	u32					dcbx_error; @@ -1876,6 +1927,8 @@ struct bnx2x {  	u32 dump_preset_idx;  	bool					stats_started;  	struct semaphore			stats_sema; + +	u8					phys_port_id[ETH_ALEN];  };  /* Tx queues may be less or equal to Rx queues */ @@ -2051,7 +2104,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,  void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);  void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,  		    u8 vf_valid, int fw_sb_id, int igu_sb_id); -u32 bnx2x_get_pretend_reg(struct bnx2x *bp);  int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);  int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);  int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); @@ -2072,7 +2124,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,  void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,  			       u8 src_type, u8 dst_type); -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae); +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, +			       u32 *comp);  /* FLR related routines */  u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp); @@ -2231,7 +2284,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,  #define BNX2X_NUM_TESTS_SF		7  #define BNX2X_NUM_TESTS_MF		3  #define BNX2X_NUM_TESTS(bp)		(IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ -						     BNX2X_NUM_TESTS_SF) +					     IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)  #define BNX2X_PHY_LOOPBACK		0  #define BNX2X_MAC_LOOPBACK		1 @@ -2433,7 +2486,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,  #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \  			    (!((me_reg) & ME_REG_VF_ERR))) -int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code); +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err); +  /* Congestion management fairness mode */  #define CMNG_FNS_NONE			0  #define CMNG_FNS_MINMAX			1 @@ -2491,11 +2545,13 @@ enum {  #define NUM_MACS	8 -enum bnx2x_pci_bus_speed { -	BNX2X_PCI_LINK_SPEED_2500 = 2500, -	BNX2X_PCI_LINK_SPEED_5000 = 5000, -	BNX2X_PCI_LINK_SPEED_8000 = 8000 -}; -  void bnx2x_set_local_cmng(struct bnx2x *bp); + +void bnx2x_update_mng_version(struct bnx2x *bp); + +#define MCPR_SCRATCH_BASE(bp) \ +	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + +#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) +  #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e66beff2704..c43e7238de2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman @@ -30,6 +30,47 @@  #include "bnx2x_init.h"  #include "bnx2x_sp.h" +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); +static int bnx2x_alloc_fp_mem(struct bnx2x *bp); +static int bnx2x_poll(struct napi_struct *napi, int budget); + +static void bnx2x_add_all_napi_cnic(struct bnx2x *bp) +{ +	int i; + +	/* Add NAPI objects */ +	for_each_rx_queue_cnic(bp, i) { +		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), +			       bnx2x_poll, NAPI_POLL_WEIGHT); +		napi_hash_add(&bnx2x_fp(bp, i, napi)); +	} +} + +static void bnx2x_add_all_napi(struct bnx2x *bp) +{ +	int i; + +	/* Add NAPI objects */ +	for_each_eth_queue(bp, i) { +		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), +			       bnx2x_poll, NAPI_POLL_WEIGHT); +		napi_hash_add(&bnx2x_fp(bp, i, napi)); +	} +} + +static int bnx2x_calc_num_queues(struct bnx2x *bp) +{ +	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); + +	/* Reduce memory usage in kdump environment by using only one queue */ +	if (reset_devices) +		nq = 1; + +	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); +	return nq; +} +  /**   * bnx2x_move_fp - move content of the fastpath structure.   * @@ -145,7 +186,7 @@ static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)  	}  } -int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ +int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */  /* free skb in the packet ring at pos idx   * return idx of last bd freed @@ -160,6 +201,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,  	struct sk_buff *skb = tx_buf->skb;  	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;  	int nbd; +	u16 split_bd_len = 0;  	/* prefetch skb end pointer to speedup dev_kfree_skb() */  	prefetch(&skb->end); @@ -167,10 +209,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,  	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",  	   txdata->txq_index, idx, tx_buf, skb); -	/* unmap first bd */  	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; -	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), -			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);  	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;  #ifdef BNX2X_STOP_ON_ERROR @@ -188,12 +227,25 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,  	--nbd;  	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); -	/* ...and the TSO split header bd since they have no mapping */ +	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { +		/* Skip second parse bd... */ +		--nbd; +		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); +	} + +	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */  	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { +		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; +		split_bd_len = BD_UNMAP_LEN(tx_data_bd);  		--nbd;  		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));  	} +	/* unmap first bd */ +	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), +			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len, +			 DMA_TO_DEVICE); +  	/* now free frags */  	while (nbd > 0) { @@ -354,7 +406,7 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,   */  static u32 bnx2x_get_rxhash(const struct bnx2x *bp,  			    const struct eth_fast_path_rx_cqe *cqe, -			    bool *l4_rxhash) +			    enum pkt_hash_types *rxhash_type)  {  	/* Get Toeplitz hash from CQE */  	if ((bp->dev->features & NETIF_F_RXHASH) && @@ -362,11 +414,13 @@ static u32 bnx2x_get_rxhash(const struct bnx2x *bp,  		enum eth_rss_hash_type htype;  		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; -		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) || -			     (htype == TCP_IPV6_HASH_TYPE); +		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) || +				(htype == TCP_IPV6_HASH_TYPE)) ? +			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; +  		return le32_to_cpu(cqe->rss_hash_result);  	} -	*l4_rxhash = false; +	*rxhash_type = PKT_HASH_TYPE_NONE;  	return 0;  } @@ -420,7 +474,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,  	tpa_info->tpa_state = BNX2X_TPA_START;  	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);  	tpa_info->placement_offset = cqe->placement_offset; -	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash); +	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);  	if (fp->mode == TPA_MODE_GRO) {  		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);  		tpa_info->full_page = SGE_PAGES / gro_size * gro_size; @@ -681,6 +735,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,  		}  	}  #endif +	skb_record_rx_queue(skb, fp->rx_queue);  	napi_gro_receive(&fp->napi, skb);  } @@ -727,8 +782,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,  		skb_reserve(skb, pad + NET_SKB_PAD);  		skb_put(skb, len); -		skb->rxhash = tpa_info->rxhash; -		skb->l4_rxhash = tpa_info->l4_rxhash; +		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);  		skb->protocol = eth_type_trans(skb, bp->dev);  		skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -749,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,  		return;  	} -	bnx2x_frag_free(fp, new_data); +	if (new_data) +		bnx2x_frag_free(fp, new_data);  drop:  	/* drop the packet and keep the buffer in the bin */  	DP(NETIF_MSG_RX_STATUS, @@ -811,7 +866,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,  		skb->ip_summed = CHECKSUM_UNNECESSARY;  } -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) +static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)  {  	struct bnx2x *bp = fp->bp;  	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; @@ -824,6 +879,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)  	if (unlikely(bp->panic))  		return 0;  #endif +	if (budget <= 0) +		return rx_pkt;  	bd_cons = fp->rx_bd_cons;  	bd_prod = fp->rx_bd_prod; @@ -845,7 +902,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)  		enum eth_rx_cqe_type cqe_fp_type;  		u16 len, pad, queue;  		u8 *data; -		bool l4_rxhash; +		u32 rxhash; +		enum pkt_hash_types rxhash_type;  #ifdef BNX2X_STOP_ON_ERROR  		if (unlikely(bp->panic)) @@ -855,6 +913,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)  		bd_prod = RX_BD(bd_prod);  		bd_cons = RX_BD(bd_cons); +		/* A rmb() is required to ensure that the CQE is not read +		 * before it is written by the adapter DMA.  PCI ordering +		 * rules will make sure the other fields are written before +		 * the marker at the end of struct eth_fast_path_rx_cqe +		 * but without rmb() a weakly ordered processor can process +		 * stale data.  Without the barrier TPA state-machine might +		 * enter inconsistent state and kernel stack might be +		 * provided with incorrect packet description - these lead +		 * to various kernel crashed. +		 */ +		rmb(); +  		cqe_fp_flags = cqe_fp->type_error_flags;  		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; @@ -986,8 +1056,8 @@ reuse_rx:  		skb->protocol = eth_type_trans(skb, bp->dev);  		/* Set Toeplitz hash for a none-LRO skb */ -		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash); -		skb->l4_rxhash = l4_rxhash; +		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type); +		skb_set_hash(skb, rxhash, rxhash_type);  		skb_checksum_none_assert(skb); @@ -1480,7 +1550,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)  	}  } -void bnx2x_free_skbs_cnic(struct bnx2x *bp) +static void bnx2x_free_skbs_cnic(struct bnx2x *bp)  {  	bnx2x_free_tx_skbs_cnic(bp);  	bnx2x_free_rx_skbs_cnic(bp); @@ -1593,36 +1663,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)  	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",  	   msix_vec); -	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); - +	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], +				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);  	/*  	 * reconfigure number of tx/rx queues according to available  	 * MSI-X vectors  	 */ -	if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { -		/* how less vectors we will have? */ -		int diff = msix_vec - rc; - -		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); - -		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); - -		if (rc) { -			BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); -			goto no_msix; -		} -		/* -		 * decrease number of queues by number of unallocated entries -		 */ -		bp->num_ethernet_queues -= diff; -		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; - -		BNX2X_DEV_INFO("New queue configuration set: %d\n", -			       bp->num_queues); -	} else if (rc > 0) { +	if (rc == -ENOSPC) {  		/* Get by with single vector */ -		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); -		if (rc) { +		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1); +		if (rc < 0) {  			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",  				       rc);  			goto no_msix; @@ -1635,8 +1685,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)  		bp->num_ethernet_queues = 1;  		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;  	} else if (rc < 0) { -		BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc); +		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);  		goto no_msix; +	} else if (rc < msix_vec) { +		/* how less vectors we will have? */ +		int diff = msix_vec - rc; + +		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc); + +		/* +		 * decrease number of queues by number of unallocated entries +		 */ +		bp->num_ethernet_queues -= diff; +		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; + +		BNX2X_DEV_INFO("New queue configuration set: %d\n", +			       bp->num_queues);  	}  	bp->flags |= USING_MSIX_FLAG; @@ -1789,26 +1853,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)  {  	int i; -	local_bh_disable();  	for_each_rx_queue_cnic(bp, i) {  		napi_disable(&bnx2x_fp(bp, i, napi)); -		while (!bnx2x_fp_lock_napi(&bp->fp[i])) -			mdelay(1); +		while (!bnx2x_fp_ll_disable(&bp->fp[i])) +			usleep_range(1000, 2000);  	} -	local_bh_enable();  }  static void bnx2x_napi_disable(struct bnx2x *bp)  {  	int i; -	local_bh_disable();  	for_each_eth_queue(bp, i) {  		napi_disable(&bnx2x_fp(bp, i, napi)); -		while (!bnx2x_fp_lock_napi(&bp->fp[i])) -			mdelay(1); +		while (!bnx2x_fp_ll_disable(&bp->fp[i])) +			usleep_range(1000, 2000);  	} -	local_bh_enable();  }  void bnx2x_netif_start(struct bnx2x *bp) @@ -1831,7 +1891,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)  		bnx2x_napi_disable_cnic(bp);  } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, +		       void *accel_priv, select_queue_fallback_t fallback)  {  	struct bnx2x *bp = netdev_priv(dev); @@ -1853,7 +1914,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)  	}  	/* select a non-FCoE queue */ -	return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); +	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);  }  void bnx2x_set_num_queues(struct bnx2x *bp) @@ -2192,8 +2253,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)  		sizeof(struct per_queue_stats) * num_queue_stats +  		sizeof(struct stats_counter); -	BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, -			bp->fw_stats_data_sz + bp->fw_stats_req_sz); +	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping, +				       bp->fw_stats_data_sz + bp->fw_stats_req_sz); +	if (!bp->fw_stats) +		goto alloc_mem_err;  	/* Set shortcuts */  	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; @@ -2262,7 +2325,7 @@ static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)   * virtualized environments a pf from another VM may have already   * initialized the device including loading FW   */ -int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code) +int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)  {  	/* is another pf loaded on this engine? */  	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && @@ -2281,8 +2344,12 @@ int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)  		/* abort nic load if version mismatch */  		if (my_fw != loaded_fw) { -			BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", -				  loaded_fw, my_fw); +			if (print_err) +				BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", +					  loaded_fw, my_fw); +			else +				BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", +					       loaded_fw, my_fw);  			return -EBUSY;  		}  	} @@ -2295,16 +2362,16 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)  	int path = BP_PATH(bp);  	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n", -	   path, load_count[path][0], load_count[path][1], -	   load_count[path][2]); -	load_count[path][0]++; -	load_count[path][1 + port]++; +	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], +	   bnx2x_load_count[path][2]); +	bnx2x_load_count[path][0]++; +	bnx2x_load_count[path][1 + port]++;  	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n", -	   path, load_count[path][0], load_count[path][1], -	   load_count[path][2]); -	if (load_count[path][0] == 1) +	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], +	   bnx2x_load_count[path][2]); +	if (bnx2x_load_count[path][0] == 1)  		return FW_MSG_CODE_DRV_LOAD_COMMON; -	else if (load_count[path][1 + port] == 1) +	else if (bnx2x_load_count[path][1 + port] == 1)  		return FW_MSG_CODE_DRV_LOAD_PORT;  	else  		return FW_MSG_CODE_DRV_LOAD_FUNCTION; @@ -2544,10 +2611,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  		}  	} -	/* Allocated memory for FW statistics  */ -	if (bnx2x_alloc_fw_stats_mem(bp)) -		LOAD_ERROR_EXIT(bp, load_error0); -  	/* need to be done after alloc mem, since it's self adjusting to amount  	 * of memory available for RSS queues  	 */ @@ -2557,6 +2620,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  		LOAD_ERROR_EXIT(bp, load_error0);  	} +	/* Allocated memory for FW statistics  */ +	if (bnx2x_alloc_fw_stats_mem(bp)) +		LOAD_ERROR_EXIT(bp, load_error0); +  	/* request pf to initialize status blocks */  	if (IS_VF(bp)) {  		rc = bnx2x_vfpf_init(bp); @@ -2597,7 +2664,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  				LOAD_ERROR_EXIT(bp, load_error1);  			/* what did mcp say? */ -			rc = bnx2x_nic_load_analyze_req(bp, load_code); +			rc = bnx2x_compare_fw_ver(bp, load_code, true);  			if (rc) {  				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);  				LOAD_ERROR_EXIT(bp, load_error2); @@ -2733,7 +2800,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  	case LOAD_OPEN:  		netif_tx_start_all_queues(bp->dev); -		smp_mb__after_clear_bit(); +		smp_mb__after_atomic();  		break;  	case LOAD_DIAG: @@ -2756,6 +2823,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)  	if (CNIC_ENABLED(bp))  		bnx2x_load_cnic(bp); +	if (IS_PF(bp)) +		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); +  	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {  		/* mark driver is loaded in shmem2 */  		u32 val; @@ -2811,8 +2881,8 @@ load_error1:  	if (IS_PF(bp))  		bnx2x_clear_pf_load(bp);  load_error0: -	bnx2x_free_fp_mem(bp);  	bnx2x_free_fw_stats_mem(bp); +	bnx2x_free_fp_mem(bp);  	bnx2x_free_mem(bp);  	return rc; @@ -2958,6 +3028,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)  	bp->port.pmf = 0; +	/* clear pending work in rtnl task */ +	bp->sp_rtnl_state = 0; +	smp_mb(); +  	/* Free SKBs, SGEs, TPA pool and driver internals */  	bnx2x_free_skbs(bp);  	if (CNIC_LOADED(bp)) @@ -2978,6 +3052,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)  	bp->state = BNX2X_STATE_CLOSED;  	bp->cnic_loaded = false; +	/* Clear driver version indication in shmem */ +	if (IS_PF(bp)) +		bnx2x_update_mng_version(bp); +  	/* Check if there are pending parity attentions. If there are - set  	 * RECOVERY_IN_PROGRESS.  	 */ @@ -3058,7 +3136,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)  /*   * net_device service functions   */ -int bnx2x_poll(struct napi_struct *napi, int budget) +static int bnx2x_poll(struct napi_struct *napi, int budget)  {  	int work_done = 0;  	u8 cos; @@ -3255,14 +3333,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)  	if (prot == IPPROTO_TCP)  		rc |= XMIT_CSUM_TCP; -	if (skb_is_gso_v6(skb)) { -		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); -		if (rc & XMIT_CSUM_ENC) -			rc |= XMIT_GSO_ENC_V6; -	} else if (skb_is_gso(skb)) { -		rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); -		if (rc & XMIT_CSUM_ENC) -			rc |= XMIT_GSO_ENC_V4; +	if (skb_is_gso(skb)) { +		if (skb_is_gso_v6(skb)) { +			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP); +			if (rc & XMIT_CSUM_ENC) +				rc |= XMIT_GSO_ENC_V6; +		} else { +			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP); +			if (rc & XMIT_CSUM_ENC) +				rc |= XMIT_GSO_ENC_V4; +		}  	}  	return rc; @@ -3815,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)  			/* set encapsulation flag in start BD */  			SET_FLAG(tx_start_bd->general_data,  				 ETH_TX_START_BD_TUNNEL_EXIST, 1); + +			tx_buf->flags |= BNX2X_HAS_SECOND_PBD; +  			nbd++;  		} else if (xmit_type & XMIT_CSUM) {  			/* Set PBD in checksum offload case w/o encapsulation */ @@ -3823,7 +3906,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)  						     xmit_type);  		} -		/* Add the macs to the parsing BD this is a vf */ +		/* Add the macs to the parsing BD if this is a vf or if +		 * Tx Switching is enabled. +		 */  		if (IS_VF(bp)) {  			/* override GRE parameters in BD */  			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, @@ -3835,6 +3920,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)  					      &pbd_e2->data.mac_addr.dst_mid,  					      &pbd_e2->data.mac_addr.dst_lo,  					      eth->h_dest); +		} else if (bp->flags & TX_SWITCHING) { +			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, +					      &pbd_e2->data.mac_addr.dst_mid, +					      &pbd_e2->data.mac_addr.dst_lo, +					      eth->h_dest);  		}  		SET_FLAG(pbd_e2_parsing_data, @@ -4183,7 +4273,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)  	/* end of fastpath */  } -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp) +static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)  {  	int i;  	for_each_cnic_queue(bp, i) @@ -4311,14 +4401,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)  	if (!IS_FCOE_IDX(index)) {  		/* status blocks */ -		if (!CHIP_IS_E1x(bp)) -			BNX2X_PCI_ALLOC(sb->e2_sb, -				&bnx2x_fp(bp, index, status_blk_mapping), -				sizeof(struct host_hc_status_block_e2)); -		else -			BNX2X_PCI_ALLOC(sb->e1x_sb, -				&bnx2x_fp(bp, index, status_blk_mapping), -			    sizeof(struct host_hc_status_block_e1x)); +		if (!CHIP_IS_E1x(bp)) { +			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), +						    sizeof(struct host_hc_status_block_e2)); +			if (!sb->e2_sb) +				goto alloc_mem_err; +		} else { +			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping), +						     sizeof(struct host_hc_status_block_e1x)); +			if (!sb->e1x_sb) +				goto alloc_mem_err; +		}  	}  	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to @@ -4337,35 +4430,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)  			   "allocating tx memory of fp %d cos %d\n",  			   index, cos); -			BNX2X_ALLOC(txdata->tx_buf_ring, -				sizeof(struct sw_tx_bd) * NUM_TX_BD); -			BNX2X_PCI_ALLOC(txdata->tx_desc_ring, -				&txdata->tx_desc_mapping, -				sizeof(union eth_tx_bd_types) * NUM_TX_BD); +			txdata->tx_buf_ring = kcalloc(NUM_TX_BD, +						      sizeof(struct sw_tx_bd), +						      GFP_KERNEL); +			if (!txdata->tx_buf_ring) +				goto alloc_mem_err; +			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping, +							       sizeof(union eth_tx_bd_types) * NUM_TX_BD); +			if (!txdata->tx_desc_ring) +				goto alloc_mem_err;  		}  	}  	/* Rx */  	if (!skip_rx_queue(bp, index)) {  		/* fastpath rx rings: rx_buf rx_desc rx_comp */ -		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), -				sizeof(struct sw_rx_bd) * NUM_RX_BD); -		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), -				&bnx2x_fp(bp, index, rx_desc_mapping), -				sizeof(struct eth_rx_bd) * NUM_RX_BD); +		bnx2x_fp(bp, index, rx_buf_ring) = +			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL); +		if (!bnx2x_fp(bp, index, rx_buf_ring)) +			goto alloc_mem_err; +		bnx2x_fp(bp, index, rx_desc_ring) = +			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping), +					sizeof(struct eth_rx_bd) * NUM_RX_BD); +		if (!bnx2x_fp(bp, index, rx_desc_ring)) +			goto alloc_mem_err;  		/* Seed all CQEs by 1s */ -		BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), -				 &bnx2x_fp(bp, index, rx_comp_mapping), -				 sizeof(struct eth_fast_path_rx_cqe) * -				 NUM_RCQ_BD); +		bnx2x_fp(bp, index, rx_comp_ring) = +			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping), +					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD); +		if (!bnx2x_fp(bp, index, rx_comp_ring)) +			goto alloc_mem_err;  		/* SGE ring */ -		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), -				sizeof(struct sw_rx_page) * NUM_RX_SGE); -		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), -				&bnx2x_fp(bp, index, rx_sge_mapping), -				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); +		bnx2x_fp(bp, index, rx_page_ring) = +			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page), +				GFP_KERNEL); +		if (!bnx2x_fp(bp, index, rx_page_ring)) +			goto alloc_mem_err; +		bnx2x_fp(bp, index, rx_sge_ring) = +			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping), +					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); +		if (!bnx2x_fp(bp, index, rx_sge_ring)) +			goto alloc_mem_err;  		/* RX BD ring */  		bnx2x_set_next_page_rx_bd(fp); @@ -4397,7 +4504,7 @@ alloc_mem_err:  	return 0;  } -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp) +static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)  {  	if (!NO_FCOE(bp))  		/* FCoE */ @@ -4410,7 +4517,7 @@ int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)  	return 0;  } -int bnx2x_alloc_fp_mem(struct bnx2x *bp) +static int bnx2x_alloc_fp_mem(struct bnx2x *bp)  {  	int i; @@ -4721,12 +4828,8 @@ void bnx2x_tx_timeout(struct net_device *dev)  		bnx2x_panic();  #endif -	smp_mb__before_clear_bit(); -	set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); -	smp_mb__after_clear_bit(); -  	/* This allows the netif to be shutdown gracefully before resetting */ -	schedule_delayed_work(&bp->sp_rtnl_task, 0); +	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);  }  int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) @@ -4854,3 +4957,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,  	disable = disable ? 1 : (usec ? 0 : 1);  	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);  } + +void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, +			    u32 verbose) +{ +	smp_mb__before_atomic(); +	set_bit(flag, &bp->sp_rtnl_state); +	smp_mb__after_atomic(); +	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", +	   flag); +	schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa7449..571427c7226 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman @@ -21,15 +21,14 @@  #include <linux/pci.h>  #include <linux/netdevice.h>  #include <linux/etherdevice.h> +#include <linux/irq.h>  #include "bnx2x.h"  #include "bnx2x_sriov.h"  /* This is used as a replacement for an MCP if it's not present */ -extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ - -extern int num_queues; -extern int int_mode; +extern int bnx2x_load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ +extern int bnx2x_num_queues;  /************************ Macros ********************************/  #define BNX2X_PCI_FREE(x, y, size) \ @@ -49,31 +48,26 @@ extern int int_mode;  		} \  	} while (0) -#define BNX2X_PCI_ALLOC(x, y, size) \ -	do { \ -		x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ -		if (x == NULL) \ -			goto alloc_mem_err; \ -		DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ -		   (unsigned long long)(*y), x); \ -	} while (0) - -#define BNX2X_PCI_FALLOC(x, y, size) \ -	do { \ -		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ -		if (x == NULL) \ -			goto alloc_mem_err; \ -		memset((void *)x, 0xFFFFFFFF, size); \ -		DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ -		   (unsigned long long)(*y), x); \ -	} while (0) - -#define BNX2X_ALLOC(x, size) \ -	do { \ -		x = kzalloc(size, GFP_KERNEL); \ -		if (x == NULL) \ -			goto alloc_mem_err; \ -	} while (0) +#define BNX2X_PCI_ALLOC(y, size)					\ +({									\ +	void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ +	if (x)								\ +		DP(NETIF_MSG_HW,					\ +		   "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n",	\ +		   (unsigned long long)(*y), x);			\ +	x;								\ +}) +#define BNX2X_PCI_FALLOC(y, size)					\ +({									\ +	void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ +	if (x) {							\ +		memset(x, 0xff, size);					\ +		DP(NETIF_MSG_HW,					\ +		   "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",	\ +		   (unsigned long long)(*y), x);			\ +	}								\ +	x;								\ +})  /*********************** Interfaces ****************************   *  Functions that need to be implemented by each driver version @@ -417,35 +411,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);   * If bp->state is OPEN, should be called with   * netif_addr_lock_bh()   */ -void bnx2x_set_rx_mode(struct net_device *dev);  void bnx2x_set_rx_mode_inner(struct bnx2x *bp); -/** - * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. - * - * @bp:		driver handle - * - * If bp->state is OPEN, should be called with - * netif_addr_lock_bh(). - */ -int bnx2x_set_storm_rx_mode(struct bnx2x *bp); - -/** - * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. - * - * @bp:			driver handle - * @cl_id:		client id - * @rx_mode_flags:	rx mode configuration - * @rx_accept_flags:	rx accept configuration - * @tx_accept_flags:	tx accept configuration (tx switch) - * @ramrod_flags:	ramrod configuration - */ -int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, -			unsigned long rx_mode_flags, -			unsigned long rx_accept_flags, -			unsigned long tx_accept_flags, -			unsigned long ramrod_flags); -  /* Parity errors related */  void bnx2x_set_pf_load(struct bnx2x *bp);  bool bnx2x_clear_pf_load(struct bnx2x *bp); @@ -524,7 +491,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);  int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);  /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, +		       void *accel_priv, select_queue_fallback_t fallback);  static inline void bnx2x_update_rx_prod(struct bnx2x *bp,  					struct bnx2x_fastpath *fp, @@ -564,9 +532,6 @@ int bnx2x_reload_if_running(struct net_device *dev);  int bnx2x_change_mac_addr(struct net_device *dev, void *p); -/* NAPI poll Rx part */ -int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); -  /* NAPI poll Tx part */  int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); @@ -577,13 +542,9 @@ int bnx2x_resume(struct pci_dev *pdev);  /* Release IRQ vectors */  void bnx2x_free_irq(struct bnx2x *bp); -void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);  void bnx2x_free_fp_mem(struct bnx2x *bp); -int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp); -int bnx2x_alloc_fp_mem(struct bnx2x *bp);  void bnx2x_init_rx_rings(struct bnx2x *bp);  void bnx2x_init_rx_rings_cnic(struct bnx2x *bp); -void bnx2x_free_skbs_cnic(struct bnx2x *bp);  void bnx2x_free_skbs(struct bnx2x *bp);  void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);  void bnx2x_netif_start(struct bnx2x *bp); @@ -607,15 +568,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);  int bnx2x_enable_msi(struct bnx2x *bp);  /** - * bnx2x_poll - NAPI callback - * - * @napi:	napi structure - * @budget: - * - */ -int bnx2x_poll(struct napi_struct *napi, int budget); - -/**   * bnx2x_low_latency_recv - LL callback   *   * @napi:	napi structure @@ -861,30 +813,6 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,  	sge->addr_lo = 0;  } -static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp) -{ -	int i; - -	/* Add NAPI objects */ -	for_each_rx_queue_cnic(bp, i) { -		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), -			       bnx2x_poll, NAPI_POLL_WEIGHT); -		napi_hash_add(&bnx2x_fp(bp, i, napi)); -	} -} - -static inline void bnx2x_add_all_napi(struct bnx2x *bp) -{ -	int i; - -	/* Add NAPI objects */ -	for_each_eth_queue(bp, i) { -		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), -			       bnx2x_poll, NAPI_POLL_WEIGHT); -		napi_hash_add(&bnx2x_fp(bp, i, napi)); -	} -} -  static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)  {  	int i; @@ -918,14 +846,6 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)  	}  } -static inline int bnx2x_calc_num_queues(struct bnx2x *bp) -{ -	return  num_queues ? -		 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : -		 min_t(int, netif_get_num_default_rss_queues(), -		       BNX2X_MAX_QUEUES(bp)); -} -  static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)  {  	int i, j; @@ -1012,7 +932,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)  	else /* CHIP_IS_E1X */  		start_params->network_cos_mode = FW_WRR; -	start_params->gre_tunnel_mode = IPGRE_TUNNEL; +	start_params->gre_tunnel_mode = L2GRE_TUNNEL;  	start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;  	return bnx2x_func_state_change(bp, &func_params); @@ -1172,8 +1092,6 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)  		return fp->cl_id;  } -u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); -  static inline void bnx2x_init_txdata(struct bnx2x *bp,  				     struct bnx2x_fp_txdata *txdata, u32 cid,  				     int txq_index, __le16 *tx_cons_sb, @@ -1206,47 +1124,6 @@ static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)  	return bp->igu_base_sb;  } -static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) -{ -	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); -	unsigned long q_type = 0; - -	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); -	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, -						     BNX2X_FCOE_ETH_CL_ID_IDX); -	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); -	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; -	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; -	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; -	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), -			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, -			  fp); - -	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); - -	/* qZone id equals to FW (per path) client id */ -	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); -	/* init shortcut */ -	bnx2x_fcoe(bp, ustorm_rx_prods_offset) = -		bnx2x_rx_ustorm_prods_offset(fp); - -	/* Configure Queue State object */ -	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); -	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); - -	/* No multi-CoS for FCoE L2 client */ -	BUG_ON(fp->max_cos != 1); - -	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, -			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), -			     bnx2x_sp_mapping(bp, q_rdata), q_type); - -	DP(NETIF_MSG_IFUP, -	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", -	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, -	   fp->igu_sb_id); -} -  static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,  				       struct bnx2x_fp_txdata *txdata)  { @@ -1443,4 +1320,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);  int bnx2x_drain_tx_queues(struct bnx2x *bp);  void bnx2x_squeeze_objects(struct bnx2x *bp); +void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag, +			    u32 verbose); +  #endif /* BNX2X_CMN_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fcf2761d882..51a952c51cb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -12,7 +12,7 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Dmitry Kravkov   *   */ @@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)  	 * as we are handling an attention on a work queue which must be  	 * flushed at some rtnl-locked contexts (e.g. if down)  	 */ -	if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) -		schedule_delayed_work(&bp->sp_rtnl_task, 0); +	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);  }  void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) @@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)  			if (IS_MF(bp))  				bnx2x_link_sync_notify(bp); -			set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); - -			schedule_delayed_work(&bp->sp_rtnl_task, 0); - +			bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);  			return;  		}  	case BNX2X_DCBX_STATE_TX_PAUSED: @@ -778,11 +774,6 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)  		/* ets may affect cmng configuration: reinit it in hw */  		bnx2x_set_local_cmng(bp); - -		set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state); - -		schedule_delayed_work(&bp->sp_rtnl_task, 0); -  		return;  	case BNX2X_DCBX_STATE_TX_RELEASED:  		DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h index 804b8f64463..c6939ecb02c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h @@ -12,7 +12,7 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Dmitry Kravkov   *   */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 324de5f0533..25eddd90f48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman @@ -358,49 +358,48 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  	cfg_idx = bnx2x_get_link_cfg_idx(bp);  	old_multi_phy_config = bp->link_params.multi_phy_config; -	switch (cmd->port) { -	case PORT_TP: -		if (bp->port.supported[cfg_idx] & SUPPORTED_TP) -			break; /* no port change */ - -		if (!(bp->port.supported[0] & SUPPORTED_TP || -		      bp->port.supported[1] & SUPPORTED_TP)) { -			DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); -			return -EINVAL; -		} -		bp->link_params.multi_phy_config &= -			~PORT_HW_CFG_PHY_SELECTION_MASK; -		if (bp->link_params.multi_phy_config & -		    PORT_HW_CFG_PHY_SWAPPED_ENABLED) -			bp->link_params.multi_phy_config |= -			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; -		else -			bp->link_params.multi_phy_config |= -			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; -		break; -	case PORT_FIBRE: -	case PORT_DA: -		if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) -			break; /* no port change */ - -		if (!(bp->port.supported[0] & SUPPORTED_FIBRE || -		      bp->port.supported[1] & SUPPORTED_FIBRE)) { +	if (cmd->port != bnx2x_get_port_type(bp)) { +		switch (cmd->port) { +		case PORT_TP: +			if (!(bp->port.supported[0] & SUPPORTED_TP || +			      bp->port.supported[1] & SUPPORTED_TP)) { +				DP(BNX2X_MSG_ETHTOOL, +				   "Unsupported port type\n"); +				return -EINVAL; +			} +			bp->link_params.multi_phy_config &= +				~PORT_HW_CFG_PHY_SELECTION_MASK; +			if (bp->link_params.multi_phy_config & +			    PORT_HW_CFG_PHY_SWAPPED_ENABLED) +				bp->link_params.multi_phy_config |= +				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; +			else +				bp->link_params.multi_phy_config |= +				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; +			break; +		case PORT_FIBRE: +		case PORT_DA: +		case PORT_NONE: +			if (!(bp->port.supported[0] & SUPPORTED_FIBRE || +			      bp->port.supported[1] & SUPPORTED_FIBRE)) { +				DP(BNX2X_MSG_ETHTOOL, +				   "Unsupported port type\n"); +				return -EINVAL; +			} +			bp->link_params.multi_phy_config &= +				~PORT_HW_CFG_PHY_SELECTION_MASK; +			if (bp->link_params.multi_phy_config & +			    PORT_HW_CFG_PHY_SWAPPED_ENABLED) +				bp->link_params.multi_phy_config |= +				PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; +			else +				bp->link_params.multi_phy_config |= +				PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; +			break; +		default:  			DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");  			return -EINVAL;  		} -		bp->link_params.multi_phy_config &= -			~PORT_HW_CFG_PHY_SELECTION_MASK; -		if (bp->link_params.multi_phy_config & -		    PORT_HW_CFG_PHY_SWAPPED_ENABLED) -			bp->link_params.multi_phy_config |= -			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; -		else -			bp->link_params.multi_phy_config |= -			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY; -		break; -	default: -		DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n"); -		return -EINVAL;  	}  	/* Save new config in case command complete successfully */  	new_multi_phy_config = bp->link_params.multi_phy_config; @@ -639,6 +638,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)  	struct bnx2x *bp = netdev_priv(dev);  	int regdump_len = 0; +	if (IS_VF(bp)) +		return 0; +  	regdump_len = __bnx2x_get_regs_len(bp);  	regdump_len *= 4;  	regdump_len += sizeof(struct dump_header); @@ -891,17 +893,8 @@ static void bnx2x_get_regs(struct net_device *dev,  	 * will re-enable parity attentions right after the dump.  	 */ -	/* Disable parity on path 0 */ -	bnx2x_pretend_func(bp, 0);  	bnx2x_disable_blocks_parity(bp); -	/* Disable parity on path 1 */ -	bnx2x_pretend_func(bp, 1); -	bnx2x_disable_blocks_parity(bp); - -	/* Return to current function */ -	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); -  	dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;  	dump_hdr.preset = DUMP_ALL_PRESETS;  	dump_hdr.version = BNX2X_DUMP_VERSION; @@ -928,18 +921,9 @@ static void bnx2x_get_regs(struct net_device *dev,  	/* Actually read the registers */  	__bnx2x_get_regs(bp, p); -	/* Re-enable parity attentions on path 0 */ -	bnx2x_pretend_func(bp, 0); -	bnx2x_clear_blocks_parity(bp); -	bnx2x_enable_blocks_parity(bp); - -	/* Re-enable parity attentions on path 1 */ -	bnx2x_pretend_func(bp, 1); +	/* Re-enable parity attentions */  	bnx2x_clear_blocks_parity(bp);  	bnx2x_enable_blocks_parity(bp); - -	/* Return to current function */ -	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));  }  static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset) @@ -993,17 +977,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,  	 * will re-enable parity attentions right after the dump.  	 */ -	/* Disable parity on path 0 */ -	bnx2x_pretend_func(bp, 0);  	bnx2x_disable_blocks_parity(bp); -	/* Disable parity on path 1 */ -	bnx2x_pretend_func(bp, 1); -	bnx2x_disable_blocks_parity(bp); - -	/* Return to current function */ -	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); -  	dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;  	dump_hdr.preset = bp->dump_preset_idx;  	dump_hdr.version = BNX2X_DUMP_VERSION; @@ -1032,19 +1007,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,  	/* Actually read the registers */  	__bnx2x_get_preset_regs(bp, p, dump_hdr.preset); -	/* Re-enable parity attentions on path 0 */ -	bnx2x_pretend_func(bp, 0); -	bnx2x_clear_blocks_parity(bp); -	bnx2x_enable_blocks_parity(bp); - -	/* Re-enable parity attentions on path 1 */ -	bnx2x_pretend_func(bp, 1); +	/* Re-enable parity attentions */  	bnx2x_clear_blocks_parity(bp);  	bnx2x_enable_blocks_parity(bp); -	/* Return to current function */ -	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); -  	return 0;  } @@ -1672,6 +1638,12 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,  		memcpy(&val, data_buf, 4); +		/* Notice unlike bnx2x_nvram_read_dword() this will not +		 * change val using be32_to_cpu(), which causes data to flip +		 * if the eeprom is read and then written back. This is due +		 * to tools utilizing this functionality that would break +		 * if this would be resolved. +		 */  		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);  		/* advance to the next dword */ @@ -2900,9 +2872,16 @@ static void bnx2x_self_test(struct net_device *dev,  	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); +	if (bnx2x_test_nvram(bp) != 0) { +		if (!IS_MF(bp)) +			buf[4] = 1; +		else +			buf[0] = 1; +		etest->flags |= ETH_TEST_FL_FAILED; +	} +  	if (!netif_running(dev)) { -		DP(BNX2X_MSG_ETHTOOL, -		   "Can't perform self-test when interface is down\n"); +		DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");  		return;  	} @@ -2964,13 +2943,7 @@ static void bnx2x_self_test(struct net_device *dev,  		/* wait until link state is restored */  		bnx2x_wait_for_link(bp, link_up, is_serdes);  	} -	if (bnx2x_test_nvram(bp) != 0) { -		if (!IS_MF(bp)) -			buf[4] = 1; -		else -			buf[0] = 1; -		etest->flags |= ETH_TEST_FL_FAILED; -	} +  	if (bnx2x_test_intr(bp) != 0) {  		if (!IS_MF(bp))  			buf[5] = 1; @@ -2997,8 +2970,9 @@ static void bnx2x_self_test(struct net_device *dev,  #define IS_PORT_STAT(i) \  	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)  #define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) -#define IS_MF_MODE_STAT(bp) \ -			(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +#define HIDE_PORT_STAT(bp) \ +		((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \ +		 IS_VF(bp))  /* ethtool statistics are displayed for all regular ethernet queues and the   * fcoe L2 queue if not disabled @@ -3020,7 +2994,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)  				      BNX2X_NUM_Q_STATS;  		} else  			num_strings = 0; -		if (IS_MF_MODE_STAT(bp)) { +		if (HIDE_PORT_STAT(bp)) {  			for (i = 0; i < BNX2X_NUM_STATS; i++)  				if (IS_FUNC_STAT(i))  					num_strings++; @@ -3075,7 +3049,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)  		}  		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { -			if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) +			if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))  				continue;  			strcpy(buf + (k + j)*ETH_GSTRING_LEN,  				   bnx2x_stats_arr[i].string); @@ -3133,7 +3107,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,  	hw_stats = (u32 *)&bp->eth_stats;  	for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { -		if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i)) +		if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))  			continue;  		if (bnx2x_stats_arr[i].size == 0) {  			/* skip this counter */ @@ -3343,7 +3317,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)  	return T_ETH_INDIRECTION_TABLE_SIZE;  } -static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) +static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)  {  	struct bnx2x *bp = netdev_priv(dev);  	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; @@ -3367,14 +3341,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)  	return 0;  } -static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir, +			  const u8 *key)  {  	struct bnx2x *bp = netdev_priv(dev);  	size_t i;  	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {  		/* -		 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() +		 * The same as in bnx2x_get_rxfh: we can't use a memcpy()  		 * as an internal storage of an indirection table is a u8 array  		 * while indir->ring_index points to an array of u32.  		 * @@ -3498,8 +3473,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {  	.get_rxnfc		= bnx2x_get_rxnfc,  	.set_rxnfc		= bnx2x_set_rxnfc,  	.get_rxfh_indir_size	= bnx2x_get_rxfh_indir_size, -	.get_rxfh_indir		= bnx2x_get_rxfh_indir, -	.set_rxfh_indir		= bnx2x_set_rxfh_indir, +	.get_rxfh		= bnx2x_get_rxfh, +	.set_rxfh		= bnx2x_set_rxfh,  	.get_channels		= bnx2x_get_channels,  	.set_channels		= bnx2x_set_channels,  	.get_module_info	= bnx2x_get_module_info, @@ -3525,16 +3500,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {  	.get_rxnfc		= bnx2x_get_rxnfc,  	.set_rxnfc		= bnx2x_set_rxnfc,  	.get_rxfh_indir_size	= bnx2x_get_rxfh_indir_size, -	.get_rxfh_indir		= bnx2x_get_rxfh_indir, -	.set_rxfh_indir		= bnx2x_set_rxfh_indir, +	.get_rxfh		= bnx2x_get_rxfh, +	.set_rxfh		= bnx2x_set_rxfh,  	.get_channels		= bnx2x_get_channels,  	.set_channels		= bnx2x_set_channels,  };  void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)  { -	if (IS_PF(bp)) -		SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); -	else /* vf */ -		SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops); +	netdev->ethtool_ops = (IS_PF(bp)) ? +		&bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;  } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 84aecdf06f7..95dc3654354 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -87,7 +87,6 @@  	(IRO[156].base + ((vfId) * IRO[156].m1))  #define CSTORM_VF_TO_PF_OFFSET(funcId) \  	(IRO[150].base + ((funcId) * IRO[150].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)  #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \  	(IRO[203].base + ((pfId) * IRO[203].m1))  #define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h index f572ae164fc..8aafd9b5d6a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h @@ -6,8 +6,8 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Vladislav Zolotarov <vladz@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Vladislav Zolotarov   * Based on the original idea of John Wright <john.wright@hp.com>.   */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 32767f6aa33..5ba8af50c84 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -172,6 +172,7 @@ struct shared_hw_cfg {			 /* NVRAM Offset */  		#define SHARED_HW_CFG_LED_MAC4                       0x000c0000  		#define SHARED_HW_CFG_LED_PHY8                       0x000d0000  		#define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000 +		#define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000  	#define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000 @@ -2002,6 +2003,23 @@ struct shmem_lfa {  	#define SHMEM_LFA_DONT_CLEAR_STAT		(1<<24)  }; +/* Used to support NSCI get OS driver version + * on driver load the version value will be set + * on driver unload driver value of 0x0 will be set. + */ +struct os_drv_ver { +#define DRV_VER_NOT_LOADED			0 + +	/* personalties order is important */ +#define DRV_PERS_ETHERNET			0 +#define DRV_PERS_ISCSI				1 +#define DRV_PERS_FCOE				2 + +	/* shmem2 struct is constant can't add more personalties here */ +#define MAX_DRV_PERS				3 +	u32 versions[MAX_DRV_PERS]; +}; +  struct ncsi_oem_fcoe_features {  	u32 fcoe_features1;  	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF @@ -2216,6 +2234,18 @@ struct shmem2_region {  	u32 reserved4;				/* Offset 0x150 */  	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */  	#define LINK_ATTR_SYNC_KR2_ENABLE	(1<<0) + +	u32 reserved5[2]; +	u32 reserved6[PORT_MAX]; + +	/* driver version for each personality */ +	struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */ + +	/* Flag to the driver that PF's drv_info_host_addr buffer was read  */ +	u32 mfw_drv_indication; + +	/* We use indication for each PF (0..3) */ +#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))  }; @@ -2847,7 +2877,7 @@ struct afex_stats {  #define BCM_5710_FW_MAJOR_VERSION			7  #define BCM_5710_FW_MINOR_VERSION			8 -#define BCM_5710_FW_REVISION_VERSION		17 +#define BCM_5710_FW_REVISION_VERSION		19  #define BCM_5710_FW_ENGINEERING_VERSION		0  #define BCM_5710_FW_COMPILE_FLAGS			1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 76df015f486..bd90e50bd8e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -7,9 +7,9 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir - * Modified by: Vladislav Zolotarov <vladz@broadcom.com> + * Modified by: Vladislav Zolotarov   */  #ifndef BNX2X_INIT_H @@ -640,23 +640,35 @@ static const struct {   * [30] MCP Latched ump_tx_parity   * [31] MCP Latched scpad_parity   */ -#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\ +#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS	\  	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \  	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ -	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ +	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\ +	(MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \  	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)  /* Below registers control the MCP parity attention output. When   * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are   * enabled, when cleared - disabled.   */ -static const u32 mcp_attn_ctl_regs[] = { -	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, -	MISC_REG_AEU_ENABLE4_NIG_0, -	MISC_REG_AEU_ENABLE4_PXP_0, -	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, -	MISC_REG_AEU_ENABLE4_NIG_1, -	MISC_REG_AEU_ENABLE4_PXP_1 +static const struct { +	u32 addr; +	u32 bits; +} mcp_attn_ctl_regs[] = { +	{ MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, +		MISC_AEU_ENABLE_MCP_PRTY_BITS }, +	{ MISC_REG_AEU_ENABLE4_NIG_0, +		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, +	{ MISC_REG_AEU_ENABLE4_PXP_0, +		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, +	{ MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, +		MISC_AEU_ENABLE_MCP_PRTY_BITS }, +	{ MISC_REG_AEU_ENABLE4_NIG_1, +		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }, +	{ MISC_REG_AEU_ENABLE4_PXP_1, +		MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }  };  static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) @@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)  	u32 reg_val;  	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { -		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); +		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);  		if (enable) -			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; +			reg_val |= mcp_attn_ctl_regs[i].bits;  		else -			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; +			reg_val &= ~mcp_attn_ctl_regs[i].bits; -		REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); +		REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);  	}  } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h index 8ab0dd90096..5669ed2e87d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h @@ -8,8 +8,8 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Vladislav Zolotarov <vladz@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Vladislav Zolotarov   */  #ifndef BNX2X_INIT_OPS_H diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 51468227bf3..53fb4fa61b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -205,6 +205,11 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,  		(_bank + (_addr & 0xf)), \  		_val) +static int bnx2x_check_half_open_conn(struct link_params *params, +				      struct link_vars *vars, u8 notify); +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, +				      struct link_params *params); +  static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)  {  	u32 val = REG_RD(bp, reg); @@ -1399,57 +1404,6 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,  	udelay(30);  } - -static void bnx2x_emac_get_pfc_stat(struct link_params *params, -				    u32 pfc_frames_sent[2], -				    u32 pfc_frames_received[2]) -{ -	/* Read pfc statistic */ -	struct bnx2x *bp = params->bp; -	u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; -	u32 val_xon = 0; -	u32 val_xoff = 0; - -	DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n"); - -	/* PFC received frames */ -	val_xoff = REG_RD(bp, emac_base + -				EMAC_REG_RX_PFC_STATS_XOFF_RCVD); -	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT; -	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD); -	val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT; - -	pfc_frames_received[0] = val_xon + val_xoff; - -	/* PFC received sent */ -	val_xoff = REG_RD(bp, emac_base + -				EMAC_REG_RX_PFC_STATS_XOFF_SENT); -	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT; -	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT); -	val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT; - -	pfc_frames_sent[0] = val_xon + val_xoff; -} - -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, -			 u32 pfc_frames_sent[2], -			 u32 pfc_frames_received[2]) -{ -	/* Read pfc statistic */ -	struct bnx2x *bp = params->bp; - -	DP(NETIF_MSG_LINK, "pfc statistic\n"); - -	if (!vars->link_up) -		return; - -	if (vars->mac_type == MAC_TYPE_EMAC) { -		DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); -		bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, -					pfc_frames_received); -	} -}  /******************************************************************/  /*			MAC/PBF section				  */  /******************************************************************/ @@ -2264,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,  	 */  	u32 val;  	struct bnx2x *bp = params->bp; -	int bnx2x_status = 0;  	u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);  	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) @@ -2278,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,  	bnx2x_update_pfc_nig(params, vars, pfc_params);  	if (!vars->link_up) -		return bnx2x_status; +		return 0;  	DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); @@ -2292,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,  		    == 0) {  			DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");  			bnx2x_emac_enable(params, vars, 0); -			return bnx2x_status; +			return 0;  		}  		if (CHIP_IS_E2(bp))  			bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); @@ -2306,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,  			val = 1;  		REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);  	} -	return bnx2x_status; +	return 0;  }  static int bnx2x_bmac1_enable(struct link_params *params, @@ -3122,7 +3075,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)  }  static int bnx2x_bsc_read(struct link_params *params, -			  struct bnx2x_phy *phy, +			  struct bnx2x *bp,  			  u8 sl_devid,  			  u16 sl_addr,  			  u8 lc_addr, @@ -3131,7 +3084,6 @@ static int bnx2x_bsc_read(struct link_params *params,  {  	u32 val, i;  	int rc = 0; -	struct bnx2x *bp = params->bp;  	if (xfer_cnt > 16) {  		DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", @@ -3750,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,  static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,  					struct link_params *params,  					struct link_vars *vars) { -	u16 lane, i, cl72_ctrl, an_adv = 0; +	u16 lane, i, cl72_ctrl, an_adv = 0, val; +	u32 wc_lane_config;  	struct bnx2x *bp = params->bp;  	static struct bnx2x_reg_set reg_set[] = {  		{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, @@ -3866,6 +3819,31 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,  		bnx2x_warpcore_enable_AN_KR2(phy, params, vars);  	} else { +		/* Enable Auto-Detect to support 1G over CL37 as well */ +		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, +				 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10); +		wc_lane_config = REG_RD(bp, params->shmem_base + +					offsetof(struct shmem_region, dev_info. +					shared_hw_config.wc_lane_config)); +		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, +				MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val); +		/* Force cl48 sync_status LOW to avoid getting stuck in CL73 +		 * parallel-detect loop when CL73 and CL37 are enabled. +		 */ +		val |= 1 << 11; + +		/* Restore Polarity settings in case it was run over by +		 * previous link owner +		 */ +		if (wc_lane_config & +		    (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane)) +			val |= 3 << 2; +		else +			val &= ~(3 << 2); +		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, +				 MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), +				 val); +  		bnx2x_disable_kr2(params, vars, phy);  	} @@ -6371,9 +6349,15 @@ int bnx2x_set_led(struct link_params *params,  			 * intended override.  			 */  			break; -		} else +		} else { +			u32 nig_led_mode = ((params->hw_led_mode << +					     SHARED_HW_CFG_LED_MODE_SHIFT) == +					    SHARED_HW_CFG_LED_EXTPHY2) ? +				(SHARED_HW_CFG_LED_PHY1 >> +				 SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;  			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, -			       hw_led_mode); +			       nig_led_mode); +		}  		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);  		/* Set blinking rate to ~15.9Hz */ @@ -6501,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,  static int bnx2x_link_initialize(struct link_params *params,  				 struct link_vars *vars)  { -	int rc = 0;  	u8 phy_index, non_ext_phy;  	struct bnx2x *bp = params->bp;  	/* In case of external phy existence, the line speed would be the @@ -6574,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,  			NIG_STATUS_XGXS0_LINK_STATUS |  			NIG_STATUS_SERDES0_LINK_STATUS |  			NIG_MASK_MI_INT)); -	return rc; +	return 0;  }  static void bnx2x_int_link_reset(struct bnx2x_phy *phy, @@ -7917,7 +7900,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,  			usleep_range(1000, 2000);  			bnx2x_warpcore_power_module(params, 1);  		} -		rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt, +		rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,  				    data_array);  	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); @@ -8115,17 +8098,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,  				*edc_mode = EDC_MODE_ACTIVE_DAC;  			else  				check_limiting_mode = 1; -		} else if (copper_module_type & -			SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { +		} else { +			*edc_mode = EDC_MODE_PASSIVE_DAC; +			/* Even in case PASSIVE_DAC indication is not set, +			 * treat it as a passive DAC cable, since some cables +			 * don't have this indication. +			 */ +			if (copper_module_type & +			    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {  				DP(NETIF_MSG_LINK,  				   "Passive Copper cable detected\n"); -				*edc_mode = -				      EDC_MODE_PASSIVE_DAC; -		} else { -			DP(NETIF_MSG_LINK, -			   "Unknown copper-cable-type 0x%x !!!\n", -			   copper_module_type); -			return -EINVAL; +			} else { +				DP(NETIF_MSG_LINK, +				   "Unknown copper-cable-type\n"); +			}  		}  		break;  	} @@ -8627,8 +8613,8 @@ static void bnx2x_set_limiting_mode(struct link_params *params,  	}  } -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, -			       struct link_params *params) +static int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, +				      struct link_params *params)  {  	struct bnx2x *bp = params->bp;  	u16 edc_mode; @@ -10653,10 +10639,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,  					 0x40);  		} else { +			/* EXTPHY2 LED mode indicate that the 100M/1G/10G LED +			 * sources are all wired through LED1, rather than only +			 * 10G in other modes. +			 */ +			val = ((params->hw_led_mode << +				SHARED_HW_CFG_LED_MODE_SHIFT) == +			       SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80; +  			bnx2x_cl45_write(bp, phy,  					 MDIO_PMA_DEVAD,  					 MDIO_PMA_REG_8481_LED1_MASK, -					 0x80); +					 val);  			/* Tell LED3 to blink on source */  			bnx2x_cl45_read(bp, phy, @@ -10812,9 +10806,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,  			   (1<<11));  	if (((phy->req_line_speed == SPEED_AUTO_NEG) && -			(phy->speed_cap_mask & -			PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || -			(phy->req_line_speed == SPEED_1000)) { +	     (phy->speed_cap_mask & +	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || +	    (phy->req_line_speed == SPEED_1000)) {  		an_1000_val |= (1<<8);  		autoneg_val |= (1<<9 | 1<<12);  		if (phy->req_duplex == DUPLEX_FULL) @@ -10830,30 +10824,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,  			0x09,  			&an_1000_val); -	/* Set 100 speed advertisement */ -	if (((phy->req_line_speed == SPEED_AUTO_NEG) && -			(phy->speed_cap_mask & -			(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | -			PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { -		an_10_100_val |= (1<<7); -		/* Enable autoneg and restart autoneg for legacy speeds */ -		autoneg_val |= (1<<9 | 1<<12); - -		if (phy->req_duplex == DUPLEX_FULL) -			an_10_100_val |= (1<<8); -		DP(NETIF_MSG_LINK, "Advertising 100M\n"); -	} - -	/* Set 10 speed advertisement */ -	if (((phy->req_line_speed == SPEED_AUTO_NEG) && -			(phy->speed_cap_mask & -			(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | -			PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { -		an_10_100_val |= (1<<5); -		autoneg_val |= (1<<9 | 1<<12); -		if (phy->req_duplex == DUPLEX_FULL) +	/* Advertise 10/100 link speed */ +	if (phy->req_line_speed == SPEED_AUTO_NEG) { +		if (phy->speed_cap_mask & +		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { +			an_10_100_val |= (1<<5); +			autoneg_val |= (1<<9 | 1<<12); +			DP(NETIF_MSG_LINK, "Advertising 10M-HD\n"); +		} +		if (phy->speed_cap_mask & +		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {  			an_10_100_val |= (1<<6); -		DP(NETIF_MSG_LINK, "Advertising 10M\n"); +			autoneg_val |= (1<<9 | 1<<12); +			DP(NETIF_MSG_LINK, "Advertising 10M-FD\n"); +		} +		if (phy->speed_cap_mask & +		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { +			an_10_100_val |= (1<<7); +			autoneg_val |= (1<<9 | 1<<12); +			DP(NETIF_MSG_LINK, "Advertising 100M-HD\n"); +		} +		if (phy->speed_cap_mask & +		    PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { +			an_10_100_val |= (1<<8); +			autoneg_val |= (1<<9 | 1<<12); +			DP(NETIF_MSG_LINK, "Advertising 100M-FD\n"); +		}  	}  	/* Only 10/100 are allowed to work in FORCE mode */ @@ -12476,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,  	u32 dont_clear_stat, lfa_sts;  	struct bnx2x *bp = params->bp; +	bnx2x_set_mdio_emac_per_phy(bp, params);  	/* Sync the link parameters */  	bnx2x_link_status_update(params, vars); @@ -13329,6 +13326,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,  	DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,  	   old_status, status); +	/* Do not touch the link in case physical link down */ +	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) +		return 1; +  	/* a. Update shmem->link_status accordingly  	 * b. Update link_vars->link_up  	 */ @@ -13378,9 +13379,9 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,  *	a fault, for example, due to break in the TX side of fiber.  *  ******************************************************************************/ -int bnx2x_check_half_open_conn(struct link_params *params, -				struct link_vars *vars, -				u8 notify) +static int bnx2x_check_half_open_conn(struct link_params *params, +				      struct link_vars *vars, +				      u8 notify)  {  	struct bnx2x *bp = params->bp;  	u32 lss_status = 0; @@ -13537,7 +13538,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,  	 */  	not_kr2_device = (((base_page & 0x8000) == 0) ||  			  (((base_page & 0x8000) && -			    ((next_page & 0xe0) == 0x2)))); +			    ((next_page & 0xe0) == 0x20))));  	/* In case KR2 is already disabled, check if we need to re-enable it */  	if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 4df45234fdc..389f5f8cb0a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -533,19 +533,11 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);  int bnx2x_ets_e3b0_config(const struct link_params *params,  			 const struct link_vars *vars,  			 struct bnx2x_ets_params *ets_params); -/* Read pfc statistic*/ -void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, -						 u32 pfc_frames_sent[2], -						 u32 pfc_frames_received[2]); +  void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,  			    u32 chip_id, u32 shmem_base, u32 shmem2_base,  			    u8 port); -int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, -			       struct link_params *params); -  void bnx2x_period_func(struct link_params *params, struct link_vars *vars); -int bnx2x_check_half_open_conn(struct link_params *params, -			       struct link_vars *vars, u8 notify);  #endif /* BNX2X_LINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 82b658d8c04..6a8b1453a1b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman @@ -27,6 +27,7 @@  #include <linux/slab.h>  #include <linux/interrupt.h>  #include <linux/pci.h> +#include <linux/aer.h>  #include <linux/init.h>  #include <linux/netdevice.h>  #include <linux/etherdevice.h> @@ -93,33 +94,34 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);  MODULE_FIRMWARE(FW_FILE_NAME_E1H);  MODULE_FIRMWARE(FW_FILE_NAME_E2); -int num_queues; -module_param(num_queues, int, 0); +int bnx2x_num_queues; +module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);  MODULE_PARM_DESC(num_queues,  		 " Set number of queues (default is as a number of CPUs)");  static int disable_tpa; -module_param(disable_tpa, int, 0); +module_param(disable_tpa, int, S_IRUGO);  MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); -int int_mode; -module_param(int_mode, int, 0); +static int int_mode; +module_param(int_mode, int, S_IRUGO);  MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "  				"(1 INT#x; 2 MSI)");  static int dropless_fc; -module_param(dropless_fc, int, 0); +module_param(dropless_fc, int, S_IRUGO);  MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");  static int mrrs = -1; -module_param(mrrs, int, 0); +module_param(mrrs, int, S_IRUGO);  MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");  static int debug; -module_param(debug, int, 0); +module_param(debug, int, S_IRUGO);  MODULE_PARM_DESC(debug, " Default debug msglevel"); -struct workqueue_struct *bnx2x_wq; +static struct workqueue_struct *bnx2x_wq; +struct workqueue_struct *bnx2x_iov_wq;  struct bnx2x_mac_vals {  	u32 xmac_addr; @@ -278,6 +280,12 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);  #define BNX2X_PREV_WAIT_NEEDED 1  static DEFINE_SEMAPHORE(bnx2x_prev_sem);  static LIST_HEAD(bnx2x_prev_list); + +/* Forward declaration */ +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp); +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); +  /****************************************************************************  * General service functions  ****************************************************************************/ @@ -503,9 +511,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,  }  /* issue a dmae command over the init-channel and wait for completion */ -int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae) +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, +			       u32 *comp)  { -	u32 *wb_comp = bnx2x_sp(bp, wb_comp);  	int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;  	int rc = 0; @@ -518,14 +526,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)  	spin_lock_bh(&bp->dmae_lock);  	/* reset completion */ -	*wb_comp = 0; +	*comp = 0;  	/* post the command on the channel used for initializations */  	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));  	/* wait for completion */  	udelay(5); -	while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { +	while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {  		if (!cnt ||  		    (bp->recovery_state != BNX2X_RECOVERY_DONE && @@ -537,7 +545,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)  		cnt--;  		udelay(50);  	} -	if (*wb_comp & DMAE_PCI_ERR_FLAG) { +	if (*comp & DMAE_PCI_ERR_FLAG) {  		BNX2X_ERR("DMAE PCI error!\n");  		rc = DMAE_PCI_ERROR;  	} @@ -574,10 +582,12 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,  	dmae.len = len32;  	/* issue the command and wait for completion */ -	rc = bnx2x_issue_dmae_with_comp(bp, &dmae); +	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));  	if (rc) {  		BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR  		bnx2x_panic(); +#endif  	}  } @@ -611,10 +621,12 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)  	dmae.len = len32;  	/* issue the command and wait for completion */ -	rc = bnx2x_issue_dmae_with_comp(bp, &dmae); +	rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));  	if (rc) {  		BNX2X_ERR("DMAE returned failure %d\n", rc); +#ifdef BNX2X_STOP_ON_ERROR  		bnx2x_panic(); +#endif  	}  } @@ -751,6 +763,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)  	return rc;  } +#define MCPR_TRACE_BUFFER_SIZE	(0x800) +#define SCRATCH_BUFFER_SIZE(bp)	\ +	(CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000)) +  void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)  {  	u32 addr, val; @@ -775,7 +791,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)  		trace_shmem_base = bp->common.shmem_base;  	else  		trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr); -	addr = trace_shmem_base - 0x800; + +	/* sanity */ +	if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE || +	    trace_shmem_base >= MCPR_SCRATCH_BASE(bp) + +				SCRATCH_BUFFER_SIZE(bp)) { +		BNX2X_ERR("Unable to dump trace buffer (mark %x)\n", +			  trace_shmem_base); +		return; +	} + +	addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;  	/* validate TRCB signature */  	mark = REG_RD(bp, addr); @@ -787,14 +813,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)  	/* read cyclic buffer pointer */  	addr += 4;  	mark = REG_RD(bp, addr); -	mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) -			+ ((mark + 0x3) & ~0x3) - 0x08000000; +	mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000; +	if (mark >= trace_shmem_base || mark < addr + 4) { +		BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n"); +		return; +	}  	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);  	printk("%s", lvl);  	/* dump buffer after the mark */ -	for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { +	for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {  		for (word = 0; word < 8; word++)  			data[word] = htonl(REG_RD(bp, offset + 4*word));  		data[8] = 0x0; @@ -890,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)  	u16 start = 0, end = 0;  	u8 cos;  #endif -	if (disable_int) +	if (IS_PF(bp) && disable_int)  		bnx2x_int_disable(bp);  	bp->stats_state = STATS_STATE_DISABLED; @@ -901,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)  	/* Indices */  	/* Common */ -	BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", -		  bp->def_idx, bp->def_att_idx, bp->attn_state, -		  bp->spq_prod_idx, bp->stats_counter); -	BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n", -		  bp->def_status_blk->atten_status_block.attn_bits, -		  bp->def_status_blk->atten_status_block.attn_bits_ack, -		  bp->def_status_blk->atten_status_block.status_block_id, -		  bp->def_status_blk->atten_status_block.attn_bits_index); -	BNX2X_ERR("     def ("); -	for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) -		pr_cont("0x%x%s", -			bp->def_status_blk->sp_sb.index_values[i], -			(i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " "); - -	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) -		*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + -			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + -			i*sizeof(u32)); - -	pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n", -	       sp_sb_data.igu_sb_id, -	       sp_sb_data.igu_seg_id, -	       sp_sb_data.p_func.pf_id, -	       sp_sb_data.p_func.vnic_id, -	       sp_sb_data.p_func.vf_id, -	       sp_sb_data.p_func.vf_valid, -	       sp_sb_data.state); +	if (IS_PF(bp)) { +		struct host_sp_status_block *def_sb = bp->def_status_blk; +		int data_size, cstorm_offset; + +		BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", +			  bp->def_idx, bp->def_att_idx, bp->attn_state, +			  bp->spq_prod_idx, bp->stats_counter); +		BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n", +			  def_sb->atten_status_block.attn_bits, +			  def_sb->atten_status_block.attn_bits_ack, +			  def_sb->atten_status_block.status_block_id, +			  def_sb->atten_status_block.attn_bits_index); +		BNX2X_ERR("     def ("); +		for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) +			pr_cont("0x%x%s", +				def_sb->sp_sb.index_values[i], +				(i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " "); + +		data_size = sizeof(struct hc_sp_status_block_data) / +			    sizeof(u32); +		cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); +		for (i = 0; i < data_size; i++) +			*((u32 *)&sp_sb_data + i) = +				REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + +					   i * sizeof(u32)); + +		pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n", +			sp_sb_data.igu_sb_id, +			sp_sb_data.igu_seg_id, +			sp_sb_data.p_func.pf_id, +			sp_sb_data.p_func.vnic_id, +			sp_sb_data.p_func.vf_id, +			sp_sb_data.p_func.vf_valid, +			sp_sb_data.state); +	}  	for_each_eth_queue(bp, i) {  		struct bnx2x_fastpath *fp = &bp->fp[i]; @@ -985,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)  			pr_cont("0x%x%s",  			       fp->sb_index_values[j],  			       (j == loop - 1) ? ")" : " "); + +		/* VF cannot access FW refelection for status block */ +		if (IS_VF(bp)) +			continue; +  		/* fw sb data */  		data_size = CHIP_IS_E1x(bp) ?  			sizeof(struct hc_status_block_data_e1x) : @@ -1036,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)  	}  #ifdef BNX2X_STOP_ON_ERROR - -	/* event queue */ -	BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); -	for (i = 0; i < NUM_EQ_DESC; i++) { -		u32 *data = (u32 *)&bp->eq_ring[i].message.data; - -		BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", -			  i, bp->eq_ring[i].message.opcode, -			  bp->eq_ring[i].message.error); -		BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); +	if (IS_PF(bp)) { +		/* event queue */ +		BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); +		for (i = 0; i < NUM_EQ_DESC; i++) { +			u32 *data = (u32 *)&bp->eq_ring[i].message.data; + +			BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", +				  i, bp->eq_ring[i].message.opcode, +				  bp->eq_ring[i].message.error); +			BNX2X_ERR("data: %x %x %x\n", +				  data[0], data[1], data[2]); +		}  	}  	/* Rings */ @@ -1112,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)  		}  	}  #endif -	bnx2x_fw_dump(bp); -	bnx2x_mc_assert(bp); +	if (IS_PF(bp)) { +		bnx2x_fw_dump(bp); +		bnx2x_mc_assert(bp); +	}  	BNX2X_ERR("end crash dump -----------------\n");  } @@ -1786,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)  		drv_cmd = BNX2X_Q_CMD_EMPTY;  		break; +	case (RAMROD_CMD_ID_ETH_TPA_UPDATE): +		DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid); +		drv_cmd = BNX2X_Q_CMD_UPDATE_TPA; +		break; +  	default:  		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",  			  command, fp->index); @@ -1806,13 +1857,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)  #else  		return;  #endif -	/* SRIOV: reschedule any 'in_progress' operations */ -	bnx2x_iov_sp_event(bp, cid, true); -	smp_mb__before_atomic_inc(); +	smp_mb__before_atomic();  	atomic_inc(&bp->cq_spq_left);  	/* push the change in bp->spq_left and towards the memory */ -	smp_mb__after_atomic_inc(); +	smp_mb__after_atomic();  	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); @@ -1827,11 +1876,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)  		 * sp_state is cleared, and this order prevents  		 * races  		 */ -		smp_mb__before_clear_bit(); +		smp_mb__before_atomic();  		set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);  		wmb();  		clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); -		smp_mb__after_clear_bit(); +		smp_mb__after_atomic();  		/* schedule the sp task as mcp ack is required */  		bnx2x_schedule_sp_task(bp); @@ -2979,6 +3028,9 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,  	if (zero_stats)  		__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); +	if (bp->flags & TX_SWITCHING) +		__set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags); +  	__set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);  	__set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags); @@ -3276,6 +3328,10 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)  	ether_stat->txq_size = bp->tx_ring_size;  	ether_stat->rxq_size = bp->rx_ring_size; + +#ifdef CONFIG_BNX2X_SRIOV +	ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; +#endif  }  static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) @@ -3425,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)  	bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);  } +#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH	(20) +#define BNX2X_UPDATE_DRV_INFO_IND_COUNT		(25) +  static void bnx2x_handle_drv_info_req(struct bnx2x *bp)  {  	enum drv_info_opcode op_code;  	u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control); +	bool release = false; +	int wait;  	/* if drv_info version supported by MFW doesn't match - send NACK */  	if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { @@ -3439,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)  	op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>  		  DRV_INFO_CONTROL_OP_CODE_SHIFT; +	/* Must prevent other flows from accessing drv_info_to_mcp */ +	mutex_lock(&bp->drv_info_mutex); +  	memset(&bp->slowpath->drv_info_to_mcp, 0,  	       sizeof(union drv_info_to_mcp)); @@ -3455,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)  	default:  		/* if op code isn't supported - send NACK */  		bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0); -		return; +		goto out;  	}  	/* if we got drv_info attn from MFW then these fields are defined in @@ -3467,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)  		U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));  	bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0); + +	/* Since possible management wants both this and get_driver_version +	 * need to wait until management notifies us it finished utilizing +	 * the buffer. +	 */ +	if (!SHMEM2_HAS(bp, mfw_drv_indication)) { +		DP(BNX2X_MSG_MCP, "Management does not support indication\n"); +	} else if (!bp->drv_info_mng_owner) { +		u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1)); + +		for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) { +			u32 indication = SHMEM2_RD(bp, mfw_drv_indication); + +			/* Management is done; need to clear indication */ +			if (indication & bit) { +				SHMEM2_WR(bp, mfw_drv_indication, +					  indication & ~bit); +				release = true; +				break; +			} + +			msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH); +		} +	} +	if (!release) { +		DP(BNX2X_MSG_MCP, "Management did not release indication\n"); +		bp->drv_info_mng_owner = true; +	} + +out: +	mutex_unlock(&bp->drv_info_mutex); +} + +static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format) +{ +	u8 vals[4]; +	int i = 0; + +	if (bnx2x_format) { +		i = sscanf(version, "1.%c%hhd.%hhd.%hhd", +			   &vals[0], &vals[1], &vals[2], &vals[3]); +		if (i > 0) +			vals[0] -= '0'; +	} else { +		i = sscanf(version, "%hhd.%hhd.%hhd.%hhd", +			   &vals[0], &vals[1], &vals[2], &vals[3]); +	} + +	while (i < 4) +		vals[i++] = 0; + +	return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3]; +} + +void bnx2x_update_mng_version(struct bnx2x *bp) +{ +	u32 iscsiver = DRV_VER_NOT_LOADED; +	u32 fcoever = DRV_VER_NOT_LOADED; +	u32 ethver = DRV_VER_NOT_LOADED; +	int idx = BP_FW_MB_IDX(bp); +	u8 *version; + +	if (!SHMEM2_HAS(bp, func_os_drv_ver)) +		return; + +	mutex_lock(&bp->drv_info_mutex); +	/* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */ +	if (bp->drv_info_mng_owner) +		goto out; + +	if (bp->state != BNX2X_STATE_OPEN) +		goto out; + +	/* Parse ethernet driver version */ +	ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); +	if (!CNIC_LOADED(bp)) +		goto out; + +	/* Try getting storage driver version via cnic */ +	memset(&bp->slowpath->drv_info_to_mcp, 0, +	       sizeof(union drv_info_to_mcp)); +	bnx2x_drv_info_iscsi_stat(bp); +	version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version; +	iscsiver = bnx2x_update_mng_version_utility(version, false); + +	memset(&bp->slowpath->drv_info_to_mcp, 0, +	       sizeof(union drv_info_to_mcp)); +	bnx2x_drv_info_fcoe_stat(bp); +	version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version; +	fcoever = bnx2x_update_mng_version_utility(version, false); + +out: +	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver); +	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver); +	SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever); + +	mutex_unlock(&bp->drv_info_mutex); + +	DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n", +	   ethver, iscsiver, fcoever);  }  static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) @@ -3609,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,  			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |  				    HW_CID(bp, cid)); -	type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; - -	type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & -		 SPE_HDR_FUNCTION_ID); +	/* In some cases, type may already contain the func-id +	 * mainly in SRIOV related use cases, so we add it here only +	 * if it's not already set. +	 */ +	if (!(cmd_type & SPE_HDR_FUNCTION_ID)) { +		type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & +			SPE_HDR_CONN_TYPE; +		type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & +			 SPE_HDR_FUNCTION_ID); +	} else { +		type = cmd_type; +	}  	spe->hdr.type = cpu_to_le16(type); @@ -3843,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)  	 * This is due to some boards consuming sufficient power when driver is  	 * up to overheat if fan fails.  	 */ -	smp_mb__before_clear_bit(); -	set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state); -	smp_mb__after_clear_bit(); -	schedule_delayed_work(&bp->sp_rtnl_task, 0); +	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);  }  static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) @@ -3990,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)  				bnx2x_handle_drv_info_req(bp);  			if (val & DRV_STATUS_VF_DISABLED) -				bnx2x_vf_handle_flr_event(bp); +				bnx2x_schedule_iov_task(bp, +							BNX2X_IOV_HANDLE_FLR);  			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))  				bnx2x_pmf_update(bp); @@ -4280,65 +4450,60 @@ static void _print_next_block(int idx, const char *blk)  	pr_cont("%s%s", idx ? ", " : "", blk);  } -static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, -					    int par_num, bool print) +static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig, +					    int *par_num, bool print)  { -	int i = 0; -	u32 cur_bit = 0; +	u32 cur_bit; +	bool res; +	int i; + +	res = false; +  	for (i = 0; sig; i++) { -		cur_bit = ((u32)0x1 << i); +		cur_bit = (0x1UL << i);  		if (sig & cur_bit) { -			switch (cur_bit) { -			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "BRB"); +			res |= true; /* Each bit is real error! */ + +			if (print) { +				switch (cur_bit) { +				case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: +					_print_next_block((*par_num)++, "BRB");  					_print_parity(bp,  						      BRB1_REG_BRB1_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "PARSER"); +					break; +				case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: +					_print_next_block((*par_num)++, +							  "PARSER");  					_print_parity(bp, PRS_REG_PRS_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "TSDM"); +					break; +				case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: +					_print_next_block((*par_num)++, "TSDM");  					_print_parity(bp,  						      TSDM_REG_TSDM_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, +					break; +				case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: +					_print_next_block((*par_num)++,  							  "SEARCHER");  					_print_parity(bp, SRC_REG_SRC_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "TCM"); -					_print_parity(bp, -						      TCM_REG_TCM_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "TSEMI"); +					break; +				case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: +					_print_next_block((*par_num)++, "TCM"); +					_print_parity(bp, TCM_REG_TCM_PRTY_STS); +					break; +				case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: +					_print_next_block((*par_num)++, +							  "TSEMI");  					_print_parity(bp,  						      TSEM_REG_TSEM_PRTY_STS_0);  					_print_parity(bp,  						      TSEM_REG_TSEM_PRTY_STS_1); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "XPB"); +					break; +				case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: +					_print_next_block((*par_num)++, "XPB");  					_print_parity(bp, GRCBASE_XPB +  							  PB_REG_PB_PRTY_STS); +					break;  				} -				break;  			}  			/* Clear the bit */ @@ -4346,53 +4511,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,  		}  	} -	return par_num; +	return res;  } -static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, -					    int par_num, bool *global, +static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig, +					    int *par_num, bool *global,  					    bool print)  { -	int i = 0; -	u32 cur_bit = 0; +	u32 cur_bit; +	bool res; +	int i; + +	res = false; +  	for (i = 0; sig; i++) { -		cur_bit = ((u32)0x1 << i); +		cur_bit = (0x1UL << i);  		if (sig & cur_bit) { +			res |= true; /* Each bit is real error! */  			switch (cur_bit) {  			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "PBF"); +					_print_next_block((*par_num)++, "PBF");  					_print_parity(bp, PBF_REG_PBF_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "QM"); +					_print_next_block((*par_num)++, "QM");  					_print_parity(bp, QM_REG_QM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "TM"); +					_print_next_block((*par_num)++, "TM");  					_print_parity(bp, TM_REG_TM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "XSDM"); +					_print_next_block((*par_num)++, "XSDM");  					_print_parity(bp,  						      XSDM_REG_XSDM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "XCM"); +					_print_next_block((*par_num)++, "XCM");  					_print_parity(bp, XCM_REG_XCM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "XSEMI"); +					_print_next_block((*par_num)++, +							  "XSEMI");  					_print_parity(bp,  						      XSEM_REG_XSEM_PRTY_STS_0);  					_print_parity(bp, @@ -4401,7 +4572,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,  				break;  			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, +					_print_next_block((*par_num)++,  							  "DOORBELLQ");  					_print_parity(bp,  						      DORQ_REG_DORQ_PRTY_STS); @@ -4409,7 +4580,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,  				break;  			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "NIG"); +					_print_next_block((*par_num)++, "NIG");  					if (CHIP_IS_E1x(bp)) {  						_print_parity(bp,  							NIG_REG_NIG_PRTY_STS); @@ -4423,32 +4594,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,  				break;  			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:  				if (print) -					_print_next_block(par_num++, +					_print_next_block((*par_num)++,  							  "VAUX PCI CORE");  				*global = true;  				break;  			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "DEBUG"); +					_print_next_block((*par_num)++, +							  "DEBUG");  					_print_parity(bp, DBG_REG_DBG_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "USDM"); +					_print_next_block((*par_num)++, "USDM");  					_print_parity(bp,  						      USDM_REG_USDM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "UCM"); +					_print_next_block((*par_num)++, "UCM");  					_print_parity(bp, UCM_REG_UCM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "USEMI"); +					_print_next_block((*par_num)++, +							  "USEMI");  					_print_parity(bp,  						      USEM_REG_USEM_PRTY_STS_0);  					_print_parity(bp, @@ -4457,21 +4630,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,  				break;  			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "UPB"); +					_print_next_block((*par_num)++, "UPB");  					_print_parity(bp, GRCBASE_UPB +  							  PB_REG_PB_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "CSDM"); +					_print_next_block((*par_num)++, "CSDM");  					_print_parity(bp,  						      CSDM_REG_CSDM_PRTY_STS);  				}  				break;  			case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:  				if (print) { -					_print_next_block(par_num++, "CCM"); +					_print_next_block((*par_num)++, "CCM");  					_print_parity(bp, CCM_REG_CCM_PRTY_STS);  				}  				break; @@ -4482,80 +4655,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,  		}  	} -	return par_num; +	return res;  } -static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, -					    int par_num, bool print) +static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, +					    int *par_num, bool print)  { -	int i = 0; -	u32 cur_bit = 0; +	u32 cur_bit; +	bool res; +	int i; + +	res = false; +  	for (i = 0; sig; i++) { -		cur_bit = ((u32)0x1 << i); +		cur_bit = (0x1UL << i);  		if (sig & cur_bit) { -			switch (cur_bit) { -			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "CSEMI"); +			res |= true; /* Each bit is real error! */ +			if (print) { +				switch (cur_bit) { +				case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: +					_print_next_block((*par_num)++, +							  "CSEMI");  					_print_parity(bp,  						      CSEM_REG_CSEM_PRTY_STS_0);  					_print_parity(bp,  						      CSEM_REG_CSEM_PRTY_STS_1); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "PXP"); +					break; +				case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: +					_print_next_block((*par_num)++, "PXP");  					_print_parity(bp, PXP_REG_PXP_PRTY_STS);  					_print_parity(bp,  						      PXP2_REG_PXP2_PRTY_STS_0);  					_print_parity(bp,  						      PXP2_REG_PXP2_PRTY_STS_1); -				} -				break; -			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: -				if (print) -					_print_next_block(par_num++, -					"PXPPCICLOCKCLIENT"); -				break; -			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "CFC"); +					break; +				case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: +					_print_next_block((*par_num)++, +							  "PXPPCICLOCKCLIENT"); +					break; +				case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: +					_print_next_block((*par_num)++, "CFC");  					_print_parity(bp,  						      CFC_REG_CFC_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "CDU"); +					break; +				case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: +					_print_next_block((*par_num)++, "CDU");  					_print_parity(bp, CDU_REG_CDU_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "DMAE"); +					break; +				case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: +					_print_next_block((*par_num)++, "DMAE");  					_print_parity(bp,  						      DMAE_REG_DMAE_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "IGU"); +					break; +				case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: +					_print_next_block((*par_num)++, "IGU");  					if (CHIP_IS_E1x(bp))  						_print_parity(bp,  							HC_REG_HC_PRTY_STS);  					else  						_print_parity(bp,  							IGU_REG_IGU_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "MISC"); +					break; +				case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: +					_print_next_block((*par_num)++, "MISC");  					_print_parity(bp,  						      MISC_REG_MISC_PRTY_STS); +					break;  				} -				break;  			}  			/* Clear the bit */ @@ -4563,40 +4729,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,  		}  	} -	return par_num; +	return res;  } -static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, -					   bool *global, bool print) +static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, +					    int *par_num, bool *global, +					    bool print)  { -	int i = 0; -	u32 cur_bit = 0; +	bool res = false; +	u32 cur_bit; +	int i; +  	for (i = 0; sig; i++) { -		cur_bit = ((u32)0x1 << i); +		cur_bit = (0x1UL << i);  		if (sig & cur_bit) {  			switch (cur_bit) {  			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:  				if (print) -					_print_next_block(par_num++, "MCP ROM"); +					_print_next_block((*par_num)++, +							  "MCP ROM");  				*global = true; +				res |= true;  				break;  			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:  				if (print) -					_print_next_block(par_num++, +					_print_next_block((*par_num)++,  							  "MCP UMP RX");  				*global = true; +				res |= true;  				break;  			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:  				if (print) -					_print_next_block(par_num++, +					_print_next_block((*par_num)++,  							  "MCP UMP TX");  				*global = true; +				res |= true;  				break;  			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:  				if (print) -					_print_next_block(par_num++, +					_print_next_block((*par_num)++,  							  "MCP SCPAD"); -				*global = true; +				/* clear latched SCPAD PATIRY from MCP */ +				REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, +				       1UL << 10);  				break;  			} @@ -4605,45 +4780,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,  		}  	} -	return par_num; +	return res;  } -static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, -					    int par_num, bool print) +static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, +					    int *par_num, bool print)  { -	int i = 0; -	u32 cur_bit = 0; +	u32 cur_bit; +	bool res; +	int i; + +	res = false; +  	for (i = 0; sig; i++) { -		cur_bit = ((u32)0x1 << i); +		cur_bit = (0x1UL << i);  		if (sig & cur_bit) { -			switch (cur_bit) { -			case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "PGLUE_B"); +			res |= true; /* Each bit is real error! */ +			if (print) { +				switch (cur_bit) { +				case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: +					_print_next_block((*par_num)++, +							  "PGLUE_B");  					_print_parity(bp, -						PGLUE_B_REG_PGLUE_B_PRTY_STS); -				} -				break; -			case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: -				if (print) { -					_print_next_block(par_num++, "ATC"); +						      PGLUE_B_REG_PGLUE_B_PRTY_STS); +					break; +				case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: +					_print_next_block((*par_num)++, "ATC");  					_print_parity(bp,  						      ATC_REG_ATC_PRTY_STS); +					break;  				} -				break;  			} -  			/* Clear the bit */  			sig &= ~cur_bit;  		}  	} -	return par_num; +	return res;  }  static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,  			      u32 *sig)  { +	bool res = false; +  	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||  	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||  	    (sig[2] & HW_PRTY_ASSERT_SET_2) || @@ -4660,23 +4840,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,  		if (print)  			netdev_err(bp->dev,  				   "Parity errors detected in blocks: "); -		par_num = bnx2x_check_blocks_with_parity0(bp, -			sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); -		par_num = bnx2x_check_blocks_with_parity1(bp, -			sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); -		par_num = bnx2x_check_blocks_with_parity2(bp, -			sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); -		par_num = bnx2x_check_blocks_with_parity3( -			sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); -		par_num = bnx2x_check_blocks_with_parity4(bp, -			sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); +		res |= bnx2x_check_blocks_with_parity0(bp, +			sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); +		res |= bnx2x_check_blocks_with_parity1(bp, +			sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print); +		res |= bnx2x_check_blocks_with_parity2(bp, +			sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print); +		res |= bnx2x_check_blocks_with_parity3(bp, +			sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print); +		res |= bnx2x_check_blocks_with_parity4(bp, +			sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);  		if (print)  			pr_cont("\n"); +	} -		return true; -	} else -		return false; +	return res;  }  /** @@ -5093,9 +5272,9 @@ static void bnx2x_after_function_update(struct bnx2x *bp)  		__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);  		/* mark latest Q bit */ -		smp_mb__before_clear_bit(); +		smp_mb__before_atomic();  		set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); -		smp_mb__after_clear_bit(); +		smp_mb__after_atomic();  		/* send Q update ramrod for FCoE Q */  		rc = bnx2x_queue_state_change(bp, &queue_params); @@ -5172,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)  		/* handle eq element */  		switch (opcode) {  		case EVENT_RING_OPCODE_VF_PF_CHANNEL: -			DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n"); -			bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event); +			bnx2x_vf_mbx_schedule(bp, +					      &elem->message.data.vf_pf_event);  			continue;  		case EVENT_RING_OPCODE_STAT_QUERY: -			DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, -			   "got statistics comp event %d\n", -			   bp->stats_comp++); +			DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS), +			       "got statistics comp event %d\n", +			       bp->stats_comp++);  			/* nothing to do with stats comp */  			goto next_spqe; @@ -5205,18 +5384,18 @@ static void bnx2x_eq_int(struct bnx2x *bp)  		case EVENT_RING_OPCODE_STOP_TRAFFIC:  			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n"); +			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);  			if (f_obj->complete_cmd(bp, f_obj,  						BNX2X_F_CMD_TX_STOP))  				break; -			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);  			goto next_spqe;  		case EVENT_RING_OPCODE_START_TRAFFIC:  			DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n"); +			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);  			if (f_obj->complete_cmd(bp, f_obj,  						BNX2X_F_CMD_TX_START))  				break; -			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);  			goto next_spqe;  		case EVENT_RING_OPCODE_FUNCTION_UPDATE: @@ -5229,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)  					break;  			} else { +				int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE; +  				DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,  				   "AFEX: ramrod completed FUNCTION_UPDATE\n");  				f_obj->complete_cmd(bp, f_obj, @@ -5238,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)  				 * sp_rtnl task as all Queue SP operations  				 * should run under rtnl_lock.  				 */ -				smp_mb__before_clear_bit(); -				set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, -					&bp->sp_rtnl_state); -				smp_mb__after_clear_bit(); - -				schedule_delayed_work(&bp->sp_rtnl_task, 0); +				bnx2x_schedule_sp_rtnl(bp, cmd, 0);  			}  			goto next_spqe; @@ -5324,7 +5500,7 @@ next_spqe:  		spqe_cnt++;  	} /* for */ -	smp_mb__before_atomic_inc(); +	smp_mb__before_atomic();  	atomic_add(spqe_cnt, &bp->eq_spq_left);  	bp->eq_cons = sw_cons; @@ -5391,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)  			     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);  	} -	/* must be called after the EQ processing (since eq leads to sriov -	 * ramrod completion flows). -	 * This flow may have been scheduled by the arrival of a ramrod -	 * completion, or by the sriov code rescheduling itself. -	 */ -	bnx2x_iov_sp_task(bp); -  	/* afex - poll to check if VIFSET_ACK should be sent to MFW */  	if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,  			       &bp->sp_state)) { @@ -5822,11 +5991,11 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)  }  /* called with netif_addr_lock_bh() */ -int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, -			unsigned long rx_mode_flags, -			unsigned long rx_accept_flags, -			unsigned long tx_accept_flags, -			unsigned long ramrod_flags) +static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, +			       unsigned long rx_mode_flags, +			       unsigned long rx_accept_flags, +			       unsigned long tx_accept_flags, +			       unsigned long ramrod_flags)  {  	struct bnx2x_rx_mode_ramrod_params ramrod_param;  	int rc; @@ -5934,7 +6103,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,  }  /* called with netif_addr_lock_bh() */ -int bnx2x_set_storm_rx_mode(struct bnx2x *bp) +static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)  {  	unsigned long rx_mode_flags = 0, ramrod_flags = 0;  	unsigned long rx_accept_flags = 0, tx_accept_flags = 0; @@ -5961,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)  {  	int i; -	if (IS_MF_SI(bp)) -		/* -		 * In switch independent mode, the TSTORM needs to accept -		 * packets that failed classification, since approximate match -		 * mac addresses aren't written to NIG LLH -		 */ -		REG_WR8(bp, BAR_TSTRORM_INTMEM + -			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); -	else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ -		REG_WR8(bp, BAR_TSTRORM_INTMEM + -			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); -  	/* Zero this manually as its initialization is  	   currently missing in the initTool */  	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -6130,6 +6287,47 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)  			bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);  } +static void bnx2x_init_fcoe_fp(struct bnx2x *bp) +{ +	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); +	unsigned long q_type = 0; + +	bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); +	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, +						     BNX2X_FCOE_ETH_CL_ID_IDX); +	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); +	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; +	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; +	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; +	bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), +			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, +			  fp); + +	DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); + +	/* qZone id equals to FW (per path) client id */ +	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); +	/* init shortcut */ +	bnx2x_fcoe(bp, ustorm_rx_prods_offset) = +		bnx2x_rx_ustorm_prods_offset(fp); + +	/* Configure Queue State object */ +	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); +	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + +	/* No multi-CoS for FCoE L2 client */ +	BUG_ON(fp->max_cos != 1); + +	bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, +			     &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), +			     bnx2x_sp_mapping(bp, q_rdata), q_type); + +	DP(NETIF_MSG_IFUP, +	   "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n", +	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, +	   fp->igu_sb_id); +} +  void bnx2x_nic_init_cnic(struct bnx2x *bp)  {  	if (!NO_FCOE(bp)) @@ -7126,7 +7324,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)  	int port = BP_PORT(bp);  	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;  	u32 low, high; -	u32 val; +	u32 val, reg;  	DP(NETIF_MSG_HW, "starting port init  port %d\n", port); @@ -7271,6 +7469,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)  	val |= CHIP_IS_E1(bp) ? 0 : 0x10;  	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); +	/* SCPAD_PARITY should NOT trigger close the gates */ +	reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0; +	REG_WR(bp, reg, +	       REG_RD(bp, reg) & +	       ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); + +	reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0; +	REG_WR(bp, reg, +	       REG_RD(bp, reg) & +	       ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY); +  	bnx2x_init_block(bp, BLOCK_NIG, init_phase);  	if (!CHIP_IS_E1x(bp)) { @@ -7893,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)  int bnx2x_alloc_mem_cnic(struct bnx2x *bp)  { -	if (!CHIP_IS_E1x(bp)) +	if (!CHIP_IS_E1x(bp)) {  		/* size = the status block + ramrod buffers */ -		BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, -				sizeof(struct host_hc_status_block_e2)); -	else -		BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, -				&bp->cnic_sb_mapping, -				sizeof(struct -				       host_hc_status_block_e1x)); +		bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, +						    sizeof(struct host_hc_status_block_e2)); +		if (!bp->cnic_sb.e2_sb) +			goto alloc_mem_err; +	} else { +		bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping, +						     sizeof(struct host_hc_status_block_e1x)); +		if (!bp->cnic_sb.e1x_sb) +			goto alloc_mem_err; +	} -	if (CONFIGURE_NIC_MODE(bp) && !bp->t2) +	if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {  		/* allocate searcher T2 table, as it wasn't allocated before */ -		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); +		bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); +		if (!bp->t2) +			goto alloc_mem_err; +	}  	/* write address to which L5 should insert its values */  	bp->cnic_eth_dev.addr_drv_info_to_mcp = @@ -7926,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)  {  	int i, allocated, context_size; -	if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) +	if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {  		/* allocate searcher T2 table */ -		BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); +		bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ); +		if (!bp->t2) +			goto alloc_mem_err; +	} -	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, -			sizeof(struct host_sp_status_block)); +	bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping, +					     sizeof(struct host_sp_status_block)); +	if (!bp->def_status_blk) +		goto alloc_mem_err; -	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, -			sizeof(struct bnx2x_slowpath)); +	bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping, +				       sizeof(struct bnx2x_slowpath)); +	if (!bp->slowpath) +		goto alloc_mem_err;  	/* Allocate memory for CDU context:  	 * This memory is allocated separately and not in the generic ILT @@ -7954,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)  	for (i = 0, allocated = 0; allocated < context_size; i++) {  		bp->context[i].size = min(CDU_ILT_PAGE_SZ,  					  (context_size - allocated)); -		BNX2X_PCI_ALLOC(bp->context[i].vcxt, -				&bp->context[i].cxt_mapping, -				bp->context[i].size); +		bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping, +						      bp->context[i].size); +		if (!bp->context[i].vcxt) +			goto alloc_mem_err;  		allocated += bp->context[i].size;  	} -	BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); +	bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line), +				 GFP_KERNEL); +	if (!bp->ilt->lines) +		goto alloc_mem_err;  	if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))  		goto alloc_mem_err; @@ -7968,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)  		goto alloc_mem_err;  	/* Slow path ring */ -	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); +	bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE); +	if (!bp->spq) +		goto alloc_mem_err;  	/* EQ */ -	BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, -			BCM_PAGE_SIZE * NUM_EQ_PAGES); +	bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping, +				      BCM_PAGE_SIZE * NUM_EQ_PAGES); +	if (!bp->eq_ring) +		goto alloc_mem_err;  	return 0; @@ -8691,16 +8921,16 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)  		int path = BP_PATH(bp);  		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n", -		   path, load_count[path][0], load_count[path][1], -		   load_count[path][2]); -		load_count[path][0]--; -		load_count[path][1 + port]--; +		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], +		   bnx2x_load_count[path][2]); +		bnx2x_load_count[path][0]--; +		bnx2x_load_count[path][1 + port]--;  		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n", -		   path, load_count[path][0], load_count[path][1], -		   load_count[path][2]); -		if (load_count[path][0] == 0) +		   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1], +		   bnx2x_load_count[path][2]); +		if (bnx2x_load_count[path][0] == 0)  			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; -		else if (load_count[path][1 + port] == 0) +		else if (bnx2x_load_count[path][1 + port] == 0)  			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;  		else  			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; @@ -8753,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)  		synchronize_irq(bp->pdev->irq);  	flush_workqueue(bnx2x_wq); +	flush_workqueue(bnx2x_iov_wq);  	while (bnx2x_func_get_state(bp, &bp->func_obj) !=  				BNX2X_F_STATE_STARTED && tout--) @@ -9315,6 +9546,10 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)  	bnx2x_process_kill_chip_reset(bp, global);  	barrier(); +	/* clear errors in PGB */ +	if (!CHIP_IS_E1x(bp)) +		REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); +  	/* Recover after reset: */  	/* MCP */  	if (global && bnx2x_reset_mcp_comp(bp, val)) @@ -9669,11 +9904,14 @@ sp_rtnl_not_reset:  			       &bp->sp_rtnl_state))  		bnx2x_pf_set_vfs_vlan(bp); -	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) +	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {  		bnx2x_dcbx_stop_hw_tx(bp); - -	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))  		bnx2x_dcbx_resume_hw_tx(bp); +	} + +	if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION, +			       &bp->sp_rtnl_state)) +		bnx2x_update_mng_version(bp);  	/* work which needs rtnl lock not-taken (as it takes the lock itself and  	 * can be called from other contexts as well) @@ -9723,7 +9961,7 @@ period_task_exit:   * Init service functions   */ -u32 bnx2x_get_pretend_reg(struct bnx2x *bp) +static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)  {  	u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;  	u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base; @@ -9810,6 +10048,82 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,  #define BNX2X_PREV_UNDI_BD(val)		((val) >> 16 & 0xffff)  #define BNX2X_PREV_UNDI_PROD(rcq, bd)	((bd) << 16 | (rcq)) +#define BCM_5710_UNDI_FW_MF_MAJOR	(0x07) +#define BCM_5710_UNDI_FW_MF_MINOR	(0x08) +#define BCM_5710_UNDI_FW_MF_VERS	(0x05) +#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) +#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) + +static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) +{ +	/* UNDI marks its presence in DORQ - +	 * it initializes CID offset for normal bell to 0x7 +	 */ +	if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & +	    MISC_REGISTERS_RESET_REG_1_RST_DORQ)) +		return false; + +	if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) { +		BNX2X_DEV_INFO("UNDI previously loaded\n"); +		return true; +	} + +	return false; +} + +static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) +{ +	u8 major, minor, version; +	u32 fw; + +	/* Must check that FW is loaded */ +	if (!(REG_RD(bp, MISC_REG_RESET_REG_1) & +	     MISC_REGISTERS_RESET_REG_1_RST_XSEM)) { +		BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n"); +		return false; +	} + +	/* Read Currently loaded FW version */ +	fw = REG_RD(bp, XSEM_REG_PRAM); +	major = fw & 0xff; +	minor = (fw >> 0x8) & 0xff; +	version = (fw >> 0x10) & 0xff; +	BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n", +		       fw, major, minor, version); + +	if (major > BCM_5710_UNDI_FW_MF_MAJOR) +		return true; + +	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && +	    (minor > BCM_5710_UNDI_FW_MF_MINOR)) +		return true; + +	if ((major == BCM_5710_UNDI_FW_MF_MAJOR) && +	    (minor == BCM_5710_UNDI_FW_MF_MINOR) && +	    (version >= BCM_5710_UNDI_FW_MF_VERS)) +		return true; + +	return false; +} + +static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp) +{ +	int i; + +	/* Due to legacy (FW) code, the first function on each engine has a +	 * different offset macro from the rest of the functions. +	 * Setting this for all 8 functions is harmless regardless of whether +	 * this is actually a multi-function device. +	 */ +	for (i = 0; i < 2; i++) +		REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1); + +	for (i = 2; i < 8; i++) +		REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1); + +	BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n"); +} +  static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)  {  	u16 rcq, bd; @@ -9879,7 +10193,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)  static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)  {  	struct bnx2x_prev_path_list *tmp_list; -	int rc = false; +	bool rc = false;  	if (down_trylock(&bnx2x_prev_sem))  		return false; @@ -10006,11 +10320,15 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)  	BNX2X_DEV_INFO("Path is unmarked\n"); +	/* Cannot proceed with FLR if UNDI is loaded, since FW does not match */ +	if (bnx2x_prev_is_after_undi(bp)) +		goto out; +  	/* If function has FLR capabilities, and existing FW version matches  	 * the one required, then FLR will be sufficient to clean any residue  	 * left by previous driver  	 */ -	rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION); +	rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);  	if (!rc) {  		/* fw version is good */ @@ -10026,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)  	BNX2X_DEV_INFO("Could not FLR\n"); +out:  	/* Close the MCP request, return failure*/  	rc = bnx2x_prev_mcp_done(bp);  	if (!rc) @@ -10056,6 +10375,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)  	/* Reset should be performed after BRB is emptied */  	if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {  		u32 timer_count = 1000; +		bool need_write = true;  		/* Close the MAC Rx to prevent BRB from filling up */  		bnx2x_prev_unload_close_mac(bp, &mac_vals); @@ -10063,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)  		/* close LLH filters towards the BRB */  		bnx2x_set_rx_filter(&bp->link_params, 0); -		/* Check if the UNDI driver was previously loaded -		 * UNDI driver initializes CID offset for normal bell to 0x7 -		 */ -		if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { -			tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); -			if (tmp_reg == 0x7) { -				BNX2X_DEV_INFO("UNDI previously loaded\n"); -				prev_undi = true; -				/* clear the UNDI indication */ -				REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); -				/* clear possible idle check errors */ -				REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); -			} +		/* Check if the UNDI driver was previously loaded */ +		if (bnx2x_prev_is_after_undi(bp)) { +			prev_undi = true; +			/* clear the UNDI indication */ +			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); +			/* clear possible idle check errors */ +			REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);  		}  		if (!CHIP_IS_E1x(bp))  			/* block FW from writing to host */ @@ -10098,10 +10412,20 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)  			else  				timer_count--; -			/* If UNDI resides in memory, manually increment it */ -			if (prev_undi) +			/* New UNDI FW supports MF and contains better +			 * cleaning methods - might be redundant but harmless. +			 */ +			if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { +				if (need_write) { +					bnx2x_prev_unload_undi_mf(bp); +					need_write = false; +				} +			} else if (prev_undi) { +				/* If UNDI resides in memory, +				 * manually increment it +				 */  				bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1); - +			}  			udelay(10);  		} @@ -10221,8 +10545,8 @@ static int bnx2x_prev_unload(struct bnx2x *bp)  	} while (--time_counter);  	if (!time_counter || rc) { -		BNX2X_ERR("Failed unloading previous driver, aborting\n"); -		rc = -EBUSY; +		BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n"); +		rc = -EPROBE_DEFER;  	}  	/* Mark function if its port was used to boot from SAN */ @@ -11149,6 +11473,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)  			bnx2x_get_cnic_mac_hwinfo(bp);  	} +	if (!BP_NOMCP(bp)) { +		/* Read physical port identifier from shmem */ +		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); +		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); +		bnx2x_set_mac_buf(bp->phys_port_id, val, val2); +		bp->flags |= HAS_PHYS_PORT_ID; +	} +  	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);  	if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) @@ -11395,9 +11727,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)  		}  	} -	/* adjust igu_sb_cnt to MF for E1x */ -	if (CHIP_IS_E1x(bp) && IS_MF(bp)) -		bp->igu_sb_cnt /= E1HVN_MAX; +	/* adjust igu_sb_cnt to MF for E1H */ +	if (CHIP_IS_E1H(bp) && IS_MF(bp)) +		bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);  	/* port info */  	bnx2x_get_port_hwinfo(bp); @@ -11552,12 +11884,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)  	mutex_init(&bp->port.phy_mutex);  	mutex_init(&bp->fw_mb_mutex); +	mutex_init(&bp->drv_info_mutex); +	bp->drv_info_mng_owner = false;  	spin_lock_init(&bp->stats_lock);  	sema_init(&bp->stats_sema, 1);  	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);  	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);  	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); +	INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);  	if (IS_PF(bp)) {  		rc = bnx2x_get_hwinfo(bp);  		if (rc) @@ -11584,7 +11919,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)  							DRV_MSG_SEQ_NUMBER_MASK;  		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); -		bnx2x_prev_unload(bp); +		rc = bnx2x_prev_unload(bp); +		if (rc) { +			bnx2x_free_mem_bp(bp); +			return rc; +		}  	}  	if (CHIP_REV_IS_FPGA(bp)) @@ -11595,6 +11934,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)  	bp->disable_tpa = disable_tpa;  	bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); +	/* Reduce memory usage in kdump environment by disabling TPA */ +	bp->disable_tpa |= reset_devices;  	/* Set TPA flags */  	if (bp->disable_tpa) { @@ -11685,9 +12026,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)  static int bnx2x_open(struct net_device *dev)  {  	struct bnx2x *bp = netdev_priv(dev); -	bool global = false; -	int other_engine = BP_PATH(bp) ? 0 : 1; -	bool other_load_status, load_status;  	int rc;  	bp->stats_init = true; @@ -11703,6 +12041,10 @@ static int bnx2x_open(struct net_device *dev)  	 * Parity recovery is only relevant for PF driver.  	 */  	if (IS_PF(bp)) { +		int other_engine = BP_PATH(bp) ? 0 : 1; +		bool other_load_status, load_status; +		bool global = false; +  		other_load_status = bnx2x_get_load_status(bp, other_engine);  		load_status = bnx2x_get_load_status(bp, BP_PATH(bp));  		if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || @@ -11746,7 +12088,7 @@ static int bnx2x_open(struct net_device *dev)  	rc = bnx2x_nic_load(bp, LOAD_OPEN);  	if (rc)  		return rc; -	return bnx2x_open_epilog(bp); +	return 0;  }  /* called with rtnl_lock */ @@ -11765,7 +12107,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,  {  	int mc_count = netdev_mc_count(bp->dev);  	struct bnx2x_mcast_list_elem *mc_mac = -		kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); +		kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);  	struct netdev_hw_addr *ha;  	if (!mc_mac) @@ -11878,7 +12220,7 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)  }  /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ -void bnx2x_set_rx_mode(struct net_device *dev) +static void bnx2x_set_rx_mode(struct net_device *dev)  {  	struct bnx2x *bp = netdev_priv(dev); @@ -11887,11 +12229,8 @@ void bnx2x_set_rx_mode(struct net_device *dev)  		return;  	} else {  		/* Schedule an SP task to handle rest of change */ -		DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); -		smp_mb__before_clear_bit(); -		set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state); -		smp_mb__after_clear_bit(); -		schedule_delayed_work(&bp->sp_rtnl_task, 0); +		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE, +				       NETIF_MSG_IFUP);  	}  } @@ -11924,11 +12263,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)  			/* configuring mcast to a vf involves sleeping (when we  			 * wait for the pf's response).  			 */ -			smp_mb__before_clear_bit(); -			set_bit(BNX2X_SP_RTNL_VFPF_MCAST, -				&bp->sp_rtnl_state); -			smp_mb__after_clear_bit(); -			schedule_delayed_work(&bp->sp_rtnl_task, 0); +			bnx2x_schedule_sp_rtnl(bp, +					       BNX2X_SP_RTNL_VFPF_MCAST, 0);  		}  	} @@ -12044,6 +12380,20 @@ static int bnx2x_validate_addr(struct net_device *dev)  	return 0;  } +static int bnx2x_get_phys_port_id(struct net_device *netdev, +				  struct netdev_phys_port_id *ppid) +{ +	struct bnx2x *bp = netdev_priv(netdev); + +	if (!(bp->flags & HAS_PHYS_PORT_ID)) +		return -EOPNOTSUPP; + +	ppid->id_len = sizeof(bp->phys_port_id); +	memcpy(ppid->id, bp->phys_port_id, ppid->id_len); + +	return 0; +} +  static const struct net_device_ops bnx2x_netdev_ops = {  	.ndo_open		= bnx2x_open,  	.ndo_stop		= bnx2x_close, @@ -12073,19 +12423,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {  #ifdef CONFIG_NET_RX_BUSY_POLL  	.ndo_busy_poll		= bnx2x_low_latency_recv,  #endif +	.ndo_get_phys_port_id	= bnx2x_get_phys_port_id,  };  static int bnx2x_set_coherency_mask(struct bnx2x *bp)  {  	struct device *dev = &bp->pdev->dev; -	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { -		bp->flags |= USING_DAC_FLAG; -		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { -			dev_err(dev, "dma_set_coherent_mask failed, aborting\n"); -			return -EIO; -		} -	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { +	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 && +	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {  		dev_err(dev, "System does not support DMA, aborting\n");  		return -EIO;  	} @@ -12093,6 +12439,14 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)  	return 0;  } +static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp) +{ +	if (bp->flags & AER_ENABLED) { +		pci_disable_pcie_error_reporting(bp->pdev); +		bp->flags &= ~AER_ENABLED; +	} +} +  static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,  			  struct net_device *dev, unsigned long board_type)  { @@ -12199,6 +12553,14 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,  	/* clean indirect addresses */  	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,  			       PCICFG_VENDOR_ID_OFFSET); + +	/* AER (Advanced Error reporting) configuration */ +	rc = pci_enable_pcie_error_reporting(pdev); +	if (!rc) +		bp->flags |= AER_ENABLED; +	else +		BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc); +  	/*  	 * Clean the following indirect addresses for all functions since it  	 * is not used by the driver. @@ -12237,10 +12599,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,  		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |  		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;  	if (!CHIP_IS_E1x(bp)) { -		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL; +		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | +				    NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;  		dev->hw_enc_features =  			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |  			NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | +			NETIF_F_GSO_IPIP | +			NETIF_F_GSO_SIT |  			NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;  	} @@ -12248,8 +12613,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,  		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;  	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX; -	if (bp->flags & USING_DAC_FLAG) -		dev->features |= NETIF_F_HIGHDMA; +	dev->features |= NETIF_F_HIGHDMA;  	/* Add Loopback capability to the device */  	dev->hw_features |= NETIF_F_LOOPBACK; @@ -12274,34 +12638,11 @@ err_out_release:  err_out_disable:  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  err_out:  	return rc;  } -static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, -				       enum bnx2x_pci_bus_speed *speed) -{ -	u32 link_speed, val = 0; - -	pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val); -	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; - -	link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; - -	switch (link_speed) { -	case 3: -		*speed = BNX2X_PCI_LINK_SPEED_8000; -		break; -	case 2: -		*speed = BNX2X_PCI_LINK_SPEED_5000; -		break; -	default: -		*speed = BNX2X_PCI_LINK_SPEED_2500; -	} -} -  static int bnx2x_check_firmware(struct bnx2x *bp)  {  	const struct firmware *firmware = bp->firmware; @@ -12596,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)  	 * without the default SB.  	 * For VFs there is no default SB, then we return (index+1).  	 */ -	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); +	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);  	index = control & PCI_MSIX_FLAGS_QSIZE; @@ -12612,24 +12953,24 @@ static int set_max_cos_est(int chip_id)  		return BNX2X_MULTI_TX_COS_E1X;  	case BCM57712:  	case BCM57712_MF: -	case BCM57712_VF:  		return BNX2X_MULTI_TX_COS_E2_E3A0;  	case BCM57800:  	case BCM57800_MF: -	case BCM57800_VF:  	case BCM57810:  	case BCM57810_MF:  	case BCM57840_4_10:  	case BCM57840_2_20:  	case BCM57840_O:  	case BCM57840_MFO: -	case BCM57810_VF:  	case BCM57840_MF: -	case BCM57840_VF:  	case BCM57811:  	case BCM57811_MF: -	case BCM57811_VF:  		return BNX2X_MULTI_TX_COS_E3B0; +	case BCM57712_VF: +	case BCM57800_VF: +	case BCM57810_VF: +	case BCM57840_VF: +	case BCM57811_VF:  		return 1;  	default:  		pr_err("Unknown board_type (%d), aborting\n", chip_id); @@ -12651,15 +12992,13 @@ static int set_is_vf(int chip_id)  	}  } -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev); -  static int bnx2x_init_one(struct pci_dev *pdev,  				    const struct pci_device_id *ent)  {  	struct net_device *dev = NULL;  	struct bnx2x *bp; -	int pcie_width; -	enum bnx2x_pci_bus_speed pcie_speed; +	enum pcie_link_width pcie_width; +	enum pci_bus_speed pcie_speed;  	int rc, max_non_def_sbs;  	int rx_count, tx_count, rss_count, doorbell_size;  	int max_cos_est; @@ -12808,24 +13147,27 @@ static int bnx2x_init_one(struct pci_dev *pdev,  		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);  		rtnl_unlock();  	} - -	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); -	BNX2X_DEV_INFO("got pcie width %d and speed %d\n", -		       pcie_width, pcie_speed); - -	BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", +	if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) || +	    pcie_speed == PCI_SPEED_UNKNOWN || +	    pcie_width == PCIE_LNK_WIDTH_UNKNOWN) +		BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n"); +	else +		BNX2X_DEV_INFO( +		       "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",  		       board_info[ent->driver_data].name,  		       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),  		       pcie_width, -		       pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" : -		       pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" : -		       pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" : +		       pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" : +		       pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" : +		       pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :  		       "Unknown",  		       dev->base_addr, bp->pdev->irq, dev->dev_addr);  	return 0;  init_one_exit: +	bnx2x_disable_pcie_error_reporting(bp); +  	if (bp->regview)  		iounmap(bp->regview); @@ -12838,7 +13180,6 @@ init_one_exit:  		pci_release_regions(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  	return rc;  } @@ -12900,28 +13241,31 @@ static void __bnx2x_remove(struct pci_dev *pdev,  		pci_set_power_state(pdev, PCI_D3hot);  	} -	if (bp->regview) -		iounmap(bp->regview); +	bnx2x_disable_pcie_error_reporting(bp); +	if (remove_netdev) { +		if (bp->regview) +			iounmap(bp->regview); -	/* for vf doorbells are part of the regview and were unmapped along with -	 * it. FW is only loaded by PF. -	 */ -	if (IS_PF(bp)) { -		if (bp->doorbells) -			iounmap(bp->doorbells); +		/* For vfs, doorbells are part of the regview and were unmapped +		 * along with it. FW is only loaded by PF. +		 */ +		if (IS_PF(bp)) { +			if (bp->doorbells) +				iounmap(bp->doorbells); -		bnx2x_release_firmware(bp); -	} -	bnx2x_free_mem_bp(bp); +			bnx2x_release_firmware(bp); +		} else { +			bnx2x_vf_pci_dealloc(bp); +		} +		bnx2x_free_mem_bp(bp); -	if (remove_netdev)  		free_netdev(dev); -	if (atomic_read(&pdev->enable_cnt) == 1) -		pci_release_regions(pdev); +		if (atomic_read(&pdev->enable_cnt) == 1) +			pci_release_regions(pdev); -	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL); +		pci_disable_device(pdev); +	}  }  static void bnx2x_remove_one(struct pci_dev *pdev) @@ -12956,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)  	netdev_reset_tc(bp->dev);  	del_timer_sync(&bp->timer); -	cancel_delayed_work(&bp->sp_task); -	cancel_delayed_work(&bp->period_task); +	cancel_delayed_work_sync(&bp->sp_task); +	cancel_delayed_work_sync(&bp->period_task);  	spin_lock_bh(&bp->stats_lock);  	bp->stats_state = STATS_STATE_DISABLED; @@ -13078,6 +13422,14 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)  	rtnl_unlock(); +	/* If AER, perform cleanup of the PCIe registers */ +	if (bp->flags & AER_ENABLED) { +		if (pci_cleanup_aer_uncorrect_error_status(pdev)) +			BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n"); +		else +			DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n"); +	} +  	return PCI_ERS_RESULT_RECOVERED;  } @@ -13165,11 +13517,18 @@ static int __init bnx2x_init(void)  		pr_err("Cannot create workqueue\n");  		return -ENOMEM;  	} +	bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov"); +	if (!bnx2x_iov_wq) { +		pr_err("Cannot create iov workqueue\n"); +		destroy_workqueue(bnx2x_wq); +		return -ENOMEM; +	}  	ret = pci_register_driver(&bnx2x_pci_driver);  	if (ret) {  		pr_err("Cannot register driver\n");  		destroy_workqueue(bnx2x_wq); +		destroy_workqueue(bnx2x_iov_wq);  	}  	return ret;  } @@ -13181,6 +13540,7 @@ static void __exit bnx2x_cleanup(void)  	pci_unregister_driver(&bnx2x_pci_driver);  	destroy_workqueue(bnx2x_wq); +	destroy_workqueue(bnx2x_iov_wq);  	/* Free globally allocated resources */  	list_for_each_safe(pos, q, &bnx2x_prev_list) { @@ -13532,9 +13892,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)  	case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {  		int count = ctl->data.credit.credit_count; -		smp_mb__before_atomic_inc(); +		smp_mb__before_atomic();  		atomic_add(count, &bp->cq_spq_left); -		smp_mb__after_atomic_inc(); +		smp_mb__after_atomic();  		break;  	}  	case DRV_CTL_ULP_REGISTER_CMD: { @@ -13574,6 +13934,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)  				REG_WR(bp, scratch_offset + i,  				       *(host_addr + i/4));  		} +		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);  		break;  	} @@ -13591,6 +13952,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)  				cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;  			SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);  		} +		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);  		break;  	} @@ -13696,6 +14058,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,  	rcu_assign_pointer(bp->cnic_ops, ops); +	/* Schedule driver to read CNIC driver versions */ +	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0); +  	return 0;  } @@ -13716,7 +14081,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)  	return 0;  } -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) +static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)  {  	struct bnx2x *bp = netdev_priv(dev);  	struct cnic_eth_dev *cp = &bp->cnic_eth_dev; @@ -13766,7 +14131,7 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)  	return cp;  } -u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)  {  	struct bnx2x *bp = fp->bp;  	u32 offset = BAR_USTRORM_INTMEM; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 5ecf267dc4c..2beb5430b87 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2864,6 +2864,17 @@  #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ		 0x9430  #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE		 0x9434  #define PGLUE_B_REG_INTERNAL_VFID_ENABLE			 0x9438 +/* [W 7] Writing 1 to each bit in this register clears a corresponding error + * details register and enables logging new error details. Bit 0 - clears + * INCORRECT_RCV_DETAILS; Bit 1 - clears RX_ERR_DETAILS; Bit 2 - clears + * TX_ERR_WR_ADD_31_0 TX_ERR_WR_ADD_63_32 TX_ERR_WR_DETAILS + * TX_ERR_WR_DETAILS2 TX_ERR_RD_ADD_31_0 TX_ERR_RD_ADD_63_32 + * TX_ERR_RD_DETAILS TX_ERR_RD_DETAILS2 TX_ERR_WR_DETAILS_ICPL; Bit 3 - + * clears VF_LENGTH_VIOLATION_DETAILS. Bit 4 - clears + * VF_GRC_SPACE_VIOLATION_DETAILS. Bit 5 - clears RX_TCPL_ERR_DETAILS. Bit 6 + * - clears TCPL_IN_TWO_RCBS_DETAILS. */ +#define PGLUE_B_REG_LATCHED_ERRORS_CLR				 0x943c +  /* [R 9] Interrupt register #0 read */  #define PGLUE_B_REG_PGLUE_B_INT_STS				 0x9298  /* [RC 9] Interrupt register #0 read clear */ @@ -5921,6 +5932,7 @@  #define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)  #define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)  #define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27) +#define MISC_REGISTERS_RESET_REG_1_RST_XSEM			 (0x1<<22)  #define MISC_REGISTERS_RESET_REG_1_SET				 0x584  #define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598  #define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24) @@ -7168,6 +7180,7 @@ Theotherbitsarereservedandshouldbezero*/  #define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca  #define MDIO_WC_REG_RX2_PCI_CTRL			0x80da  #define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea +#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI		0x80fa  #define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104  #define MDIO_WC_REG_XGXS_STATUS3			0x8129  #define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 9fbeee522d2..b1936044767 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -12,7 +12,7 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Vladislav Zolotarov   *   */ @@ -258,16 +258,16 @@ static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)  static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(o->state, o->pstate); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  }  static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	set_bit(o->state, o->pstate); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  }  /** @@ -355,23 +355,6 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)  	return vp->get(vp, 1);  } - -static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ -	struct bnx2x_credit_pool_obj *mp = o->macs_pool; -	struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - -	if (!mp->get(mp, 1)) -		return false; - -	if (!vp->get(vp, 1)) { -		mp->put(mp, 1); -		return false; -	} - -	return true; -} -  static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)  {  	struct bnx2x_credit_pool_obj *mp = o->macs_pool; @@ -400,22 +383,6 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)  	return vp->put(vp, 1);  } -static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) -{ -	struct bnx2x_credit_pool_obj *mp = o->macs_pool; -	struct bnx2x_credit_pool_obj *vp = o->vlans_pool; - -	if (!mp->put(mp, 1)) -		return false; - -	if (!vp->put(vp, 1)) { -		mp->get(mp, 1); -		return false; -	} - -	return true; -} -  /**   * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock   * @@ -507,22 +474,6 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,  	}  } -/** - * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock - * - * @bp:			device handle - * @o:			vlan_mac object - * - * @details Notice if a pending execution exists, it would perform it - - *          possibly releasing and reclaiming the execution queue lock. - */ -void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, -				   struct bnx2x_vlan_mac_obj *o) -{ -	spin_lock_bh(&o->exe_queue.lock); -	__bnx2x_vlan_mac_h_write_unlock(bp, o); -	spin_unlock_bh(&o->exe_queue.lock); -}  /**   * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock @@ -663,7 +614,7 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,  	/* Check if a requested MAC already exists */  	list_for_each_entry(pos, &o->head, link) -		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) && +		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&  		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))  			return -EEXIST; @@ -685,26 +636,6 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,  	return 0;  } -static int bnx2x_check_vlan_mac_add(struct bnx2x *bp, -				    struct bnx2x_vlan_mac_obj *o, -				   union bnx2x_classification_ramrod_data *data) -{ -	struct bnx2x_vlan_mac_registry_elem *pos; - -	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n", -	   data->vlan_mac.mac, data->vlan_mac.vlan); - -	list_for_each_entry(pos, &o->head, link) -		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && -		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, -				  ETH_ALEN)) && -		    (data->vlan_mac.is_inner_mac == -		     pos->u.vlan_mac.is_inner_mac)) -			return -EEXIST; - -	return 0; -} -  /* check_del() callbacks */  static struct bnx2x_vlan_mac_registry_elem *  	bnx2x_check_mac_del(struct bnx2x *bp, @@ -716,7 +647,7 @@ static struct bnx2x_vlan_mac_registry_elem *  	DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);  	list_for_each_entry(pos, &o->head, link) -		if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) && +		if (ether_addr_equal(data->mac.mac, pos->u.mac.mac) &&  		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))  			return pos; @@ -739,27 +670,6 @@ static struct bnx2x_vlan_mac_registry_elem *  	return NULL;  } -static struct bnx2x_vlan_mac_registry_elem * -	bnx2x_check_vlan_mac_del(struct bnx2x *bp, -				 struct bnx2x_vlan_mac_obj *o, -				 union bnx2x_classification_ramrod_data *data) -{ -	struct bnx2x_vlan_mac_registry_elem *pos; - -	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n", -	   data->vlan_mac.mac, data->vlan_mac.vlan); - -	list_for_each_entry(pos, &o->head, link) -		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && -		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, -			     ETH_ALEN)) && -		    (data->vlan_mac.is_inner_mac == -		     pos->u.vlan_mac.is_inner_mac)) -			return pos; - -	return NULL; -} -  /* check_move() callback */  static bool bnx2x_check_move(struct bnx2x *bp,  			     struct bnx2x_vlan_mac_obj *src_o, @@ -811,8 +721,8 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)  	return rx_tx_flag;  } -void bnx2x_set_mac_in_nig(struct bnx2x *bp, -			  bool add, unsigned char *dev_addr, int index) +static void bnx2x_set_mac_in_nig(struct bnx2x *bp, +				 bool add, unsigned char *dev_addr, int index)  {  	u32 wb_data[2];  	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : @@ -1126,100 +1036,6 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,  					rule_cnt);  } -static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, -				      struct bnx2x_vlan_mac_obj *o, -				      struct bnx2x_exeq_elem *elem, -				      int rule_idx, int cam_offset) -{ -	struct bnx2x_raw_obj *raw = &o->raw; -	struct eth_classify_rules_ramrod_data *data = -		(struct eth_classify_rules_ramrod_data *)(raw->rdata); -	int rule_cnt = rule_idx + 1; -	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; -	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; -	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; -	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; -	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; - -	/* Reset the ramrod data buffer for the first rule */ -	if (rule_idx == 0) -		memset(data, 0, sizeof(*data)); - -	/* Set a rule header */ -	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, -				      &rule_entry->pair.header); - -	/* Set VLAN and MAC themselves */ -	rule_entry->pair.vlan = cpu_to_le16(vlan); -	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, -			      &rule_entry->pair.mac_mid, -			      &rule_entry->pair.mac_lsb, mac); -	rule_entry->pair.inner_mac = -		cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac); -	/* MOVE: Add a rule that will add this MAC to the target Queue */ -	if (cmd == BNX2X_VLAN_MAC_MOVE) { -		rule_entry++; -		rule_cnt++; - -		/* Setup ramrod data */ -		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, -					elem->cmd_data.vlan_mac.target_obj, -					      true, CLASSIFY_RULE_OPCODE_PAIR, -					      &rule_entry->pair.header); - -		/* Set a VLAN itself */ -		rule_entry->pair.vlan = cpu_to_le16(vlan); -		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, -				      &rule_entry->pair.mac_mid, -				      &rule_entry->pair.mac_lsb, mac); -		rule_entry->pair.inner_mac = -			cpu_to_le16(elem->cmd_data.vlan_mac.u. -						vlan_mac.is_inner_mac); -	} - -	/* Set the ramrod data header */ -	/* TODO: take this to the higher level in order to prevent multiple -		 writing */ -	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, -					rule_cnt); -} - -/** - * bnx2x_set_one_vlan_mac_e1h - - * - * @bp:		device handle - * @o:		bnx2x_vlan_mac_obj - * @elem:	bnx2x_exeq_elem - * @rule_idx:	rule_idx - * @cam_offset:	cam_offset - */ -static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, -				       struct bnx2x_vlan_mac_obj *o, -				       struct bnx2x_exeq_elem *elem, -				       int rule_idx, int cam_offset) -{ -	struct bnx2x_raw_obj *raw = &o->raw; -	struct mac_configuration_cmd *config = -		(struct mac_configuration_cmd *)(raw->rdata); -	/* 57710 and 57711 do not support MOVE command, -	 * so it's either ADD or DEL -	 */ -	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? -		true : false; - -	/* Reset the ramrod data buffer */ -	memset(config, 0, sizeof(*config)); - -	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, -				     cam_offset, add, -				     elem->cmd_data.vlan_mac.u.vlan_mac.mac, -				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan, -				     ETH_VLAN_FILTER_CLASSIFY, config); -} - -#define list_next_entry(pos, member) \ -	list_entry((pos)->member.next, typeof(*(pos)), member) -  /**   * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element   * @@ -1319,24 +1135,6 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(  	return NULL;  } -static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( -	struct bnx2x_exe_queue_obj *o, -	struct bnx2x_exeq_elem *elem) -{ -	struct bnx2x_exeq_elem *pos; -	struct bnx2x_vlan_mac_ramrod_data *data = -		&elem->cmd_data.vlan_mac.u.vlan_mac; - -	/* Check pending for execution commands */ -	list_for_each_entry(pos, &o->exe_queue, link) -		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, -			      sizeof(*data)) && -		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) -			return pos; - -	return NULL; -} -  /**   * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed   * @@ -2041,6 +1839,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,  	struct bnx2x_vlan_mac_ramrod_params p;  	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;  	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; +	unsigned long flags;  	int read_lock;  	int rc = 0; @@ -2049,8 +1848,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,  	spin_lock_bh(&exeq->lock);  	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { -		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == -		    *vlan_mac_flags) { +		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags; +		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == +		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {  			rc = exeq->remove(bp, exeq->owner, exeq_pos);  			if (rc) {  				BNX2X_ERR("Failed to remove command\n"); @@ -2083,7 +1883,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,  		return read_lock;  	list_for_each_entry(pos, &o->head, link) { -		if (pos->vlan_mac_flags == *vlan_mac_flags) { +		flags = pos->vlan_mac_flags; +		if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) == +		    BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {  			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;  			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));  			rc = bnx2x_config_vlan_mac(bp, &p); @@ -2240,69 +2042,6 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,  	}  } -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, -			     struct bnx2x_vlan_mac_obj *vlan_mac_obj, -			     u8 cl_id, u32 cid, u8 func_id, void *rdata, -			     dma_addr_t rdata_mapping, int state, -			     unsigned long *pstate, bnx2x_obj_type type, -			     struct bnx2x_credit_pool_obj *macs_pool, -			     struct bnx2x_credit_pool_obj *vlans_pool) -{ -	union bnx2x_qable_obj *qable_obj = -		(union bnx2x_qable_obj *)vlan_mac_obj; - -	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, -				   rdata_mapping, state, pstate, type, -				   macs_pool, vlans_pool); - -	/* CAM pool handling */ -	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; -	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; -	/* CAM offset is relevant for 57710 and 57711 chips only which have a -	 * single CAM for both MACs and VLAN-MAC pairs. So the offset -	 * will be taken from MACs' pool object only. -	 */ -	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; -	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; - -	if (CHIP_IS_E1(bp)) { -		BNX2X_ERR("Do not support chips others than E2\n"); -		BUG(); -	} else if (CHIP_IS_E1H(bp)) { -		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h; -		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del; -		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add; -		vlan_mac_obj->check_move        = bnx2x_check_move_always_err; -		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC; - -		/* Exe Queue */ -		bnx2x_exe_queue_init(bp, -				     &vlan_mac_obj->exe_queue, 1, qable_obj, -				     bnx2x_validate_vlan_mac, -				     bnx2x_remove_vlan_mac, -				     bnx2x_optimize_vlan_mac, -				     bnx2x_execute_vlan_mac, -				     bnx2x_exeq_get_vlan_mac); -	} else { -		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2; -		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del; -		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add; -		vlan_mac_obj->check_move        = bnx2x_check_move; -		vlan_mac_obj->ramrod_cmd        = -			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; - -		/* Exe Queue */ -		bnx2x_exe_queue_init(bp, -				     &vlan_mac_obj->exe_queue, -				     CLASSIFY_RULES_COUNT, -				     qable_obj, bnx2x_validate_vlan_mac, -				     bnx2x_remove_vlan_mac, -				     bnx2x_optimize_vlan_mac, -				     bnx2x_execute_vlan_mac, -				     bnx2x_exeq_get_vlan_mac); -	} -} -  /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */  static inline void __storm_memset_mac_filters(struct bnx2x *bp,  			struct tstorm_eth_mac_filter_config *mac_filters, @@ -2392,7 +2131,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,  	/* The operation is completed */  	clear_bit(p->state, p->pstate); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	return 0;  } @@ -2538,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,  			 data->header.rule_cnt, p->rx_accept_flags,  			 p->tx_accept_flags); -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */  	/* Send a ramrod */ @@ -3243,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,  		raw->clear_pending(raw);  		return 0;  	} else { -		/* No need for an explicit memory barrier here as long we would -		 * need to ensure the ordering of writing to the SPQ element +		/* No need for an explicit memory barrier here as long as we +		 * ensure the ordering of writing to the SPQ element  		 * and updating of the SPQ producer which involves a memory -		 * read and we will have to put a full memory barrier there -		 * (inside bnx2x_sp_post()). +		 * read. If the memory read is removed we will have to put a +		 * full memory barrier there (inside bnx2x_sp_post()).  		 */  		/* Send a ramrod */ @@ -3727,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,  		raw->clear_pending(raw);  		return 0;  	} else { -		/* No need for an explicit memory barrier here as long we would -		 * need to ensure the ordering of writing to the SPQ element +		/* No need for an explicit memory barrier here as long as we +		 * ensure the ordering of writing to the SPQ element  		 * and updating of the SPQ producer which involves a memory -		 * read and we will have to put a full memory barrier there -		 * (inside bnx2x_sp_post()). +		 * read. If the memory read is removed we will have to put a +		 * full memory barrier there (inside bnx2x_sp_post()).  		 */  		/* Send a ramrod */ @@ -3837,16 +3576,16 @@ error_exit1:  static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(o->sched_state, o->raw.pstate); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  }  static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	set_bit(o->sched_state, o->raw.pstate); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  }  static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) @@ -4352,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,  		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;  	} -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */  	/* Send a ramrod */ @@ -4385,8 +4124,11 @@ int bnx2x_config_rss(struct bnx2x *bp,  	struct bnx2x_raw_obj *r = &o->raw;  	/* Do nothing if only driver cleanup was requested */ -	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) +	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { +		DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n", +		   p->ramrod_flags);  		return 0; +	}  	r->set_pending(r); @@ -4416,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,  	rss_obj->config_rss = bnx2x_setup_rss;  } -int validate_vlan_mac(struct bnx2x *bp, -		      struct bnx2x_vlan_mac_obj *vlan_mac) -{ -	if (!vlan_mac->get_n_elements) { -		BNX2X_ERR("vlan mac object was not intialized\n"); -		return -EINVAL; -	} -	return 0; -} -  /********************** Queue state object ***********************************/  /** @@ -4468,7 +4200,7 @@ int bnx2x_queue_state_change(struct bnx2x *bp,  		if (rc) {  			o->next_state = BNX2X_Q_STATE_MAX;  			clear_bit(pending_bit, pending); -			smp_mb__after_clear_bit(); +			smp_mb__after_atomic();  			return rc;  		} @@ -4556,7 +4288,7 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,  	wmb();  	clear_bit(cmd, &o->pending); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	return 0;  } @@ -4845,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,  	/* Fill the ramrod data */  	bnx2x_q_fill_setup_data_cmn(bp, params, rdata); -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */ -  	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],  			     U64_HI(data_mapping),  			     U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4873,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,  	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);  	bnx2x_q_fill_setup_data_e2(bp, params, rdata); -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */ -  	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],  			     U64_HI(data_mapping),  			     U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4917,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,  			 o->cids[cid_index], rdata->general.client_id,  			 rdata->general.sp_client_id, rdata->general.cos); -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */ -  	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],  			     U64_HI(data_mapping),  			     U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -4986,6 +4715,13 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp,  		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);  	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);  	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); + +	/* tx switching */ +	data->tx_switching_flg = +		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING, ¶ms->update_flags); +	data->tx_switching_change_flg = +		test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, +			 ¶ms->update_flags);  }  static inline int bnx2x_q_send_update(struct bnx2x *bp, @@ -5011,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,  	/* Fill the ramrod data */  	bnx2x_q_fill_update_data(bp, o, update_params, rdata); -	/* No need for an explicit memory barrier here as long we would -	 * need to ensure the ordering of writing to the SPQ element +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element  	 * and updating of the SPQ producer which involves a memory -	 * read and we will have to put a full memory barrier there -	 * (inside bnx2x_sp_post()). +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */ -  	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,  			     o->cids[cid_index], U64_HI(data_mapping),  			     U64_LO(data_mapping), ETH_CONNECTION_TYPE); @@ -5064,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,  	return bnx2x_q_send_update(bp, params);  } +static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp, +				struct bnx2x_queue_sp_obj *obj, +				struct bnx2x_queue_update_tpa_params *params, +				struct tpa_update_ramrod_data *data) +{ +	data->client_id = obj->cl_id; +	data->complete_on_both_clients = params->complete_on_both_clients; +	data->dont_verify_rings_pause_thr_flg = +		params->dont_verify_thr; +	data->max_agg_size = cpu_to_le16(params->max_agg_sz); +	data->max_sges_for_packet = params->max_sges_pkt; +	data->max_tpa_queues = params->max_tpa_queues; +	data->sge_buff_size = cpu_to_le16(params->sge_buff_sz); +	data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map)); +	data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map)); +	data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high); +	data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low); +	data->tpa_mode = params->tpa_mode; +	data->update_ipv4 = params->update_ipv4; +	data->update_ipv6 = params->update_ipv6; +} +  static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,  					struct bnx2x_queue_state_params *params)  { -	/* TODO: Not implemented yet. */ -	return -1; +	struct bnx2x_queue_sp_obj *o = params->q_obj; +	struct tpa_update_ramrod_data *rdata = +		(struct tpa_update_ramrod_data *)o->rdata; +	dma_addr_t data_mapping = o->rdata_mapping; +	struct bnx2x_queue_update_tpa_params *update_tpa_params = +		¶ms->params.update_tpa; +	u16 type; + +	/* Clear the ramrod data */ +	memset(rdata, 0, sizeof(*rdata)); + +	/* Fill the ramrod data */ +	bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata); + +	/* Add the function id inside the type, so that sp post function +	 * doesn't automatically add the PF func-id, this is required +	 * for operations done by PFs on behalf of their VFs +	 */ +	type = ETH_CONNECTION_TYPE | +		((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT); + +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element +	 * and updating of the SPQ producer which involves a memory +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()). +	 */ +	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE, +			     o->cids[BNX2X_PRIMARY_CID_INDEX], +			     U64_HI(data_mapping), +			     U64_LO(data_mapping), type);  }  static inline int bnx2x_q_send_halt(struct bnx2x *bp, @@ -5493,7 +5279,7 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,  	wmb();  	clear_bit(cmd, &o->pending); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	return 0;  } @@ -5898,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,  	rdata->tx_switch_suspend = switch_update_params->suspend;  	rdata->echo = SWITCH_UPDATE; +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element +	 * and updating of the SPQ producer which involves a memory +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()). +	 */  	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,  			     U64_HI(data_mapping),  			     U64_LO(data_mapping), NONE_CONNECTION_TYPE); @@ -5925,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,  	rdata->allowed_priorities = afex_update_params->allowed_priorities;  	rdata->echo = AFEX_UPDATE; -	/*  No need for an explicit memory barrier here as long we would -	 *  need to ensure the ordering of writing to the SPQ element -	 *  and updating of the SPQ producer which involves a memory -	 *  read and we will have to put a full memory barrier there -	 *  (inside bnx2x_sp_post()). +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element +	 * and updating of the SPQ producer which involves a memory +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()).  	 */  	DP(BNX2X_MSG_SP,  	   "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", @@ -6014,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,  		rdata->traffic_type_to_priority_cos[i] =  			tx_start_params->traffic_type_to_priority_cos[i]; +	/* No need for an explicit memory barrier here as long as we +	 * ensure the ordering of writing to the SPQ element +	 * and updating of the SPQ producer which involves a memory +	 * read. If the memory read is removed we will have to put a +	 * full memory barrier there (inside bnx2x_sp_post()). +	 */  	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,  			     U64_HI(data_mapping),  			     U64_LO(data_mapping), NONE_CONNECTION_TYPE); @@ -6128,7 +5926,7 @@ int bnx2x_func_state_change(struct bnx2x *bp,  		if (rc) {  			o->next_state = BNX2X_F_STATE_MAX;  			clear_bit(cmd, pending); -			smp_mb__after_clear_bit(); +			smp_mb__after_atomic();  			return rc;  		} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 658f4e33abf..718ecd29466 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -12,7 +12,7 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Vladislav Zolotarov   *   */ @@ -266,6 +266,13 @@ enum {  	BNX2X_DONT_CONSUME_CAM_CREDIT,  	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,  }; +/* When looking for matching filters, some flags are not interesting */ +#define BNX2X_VLAN_MAC_CMP_MASK	(1 << BNX2X_UC_LIST_MAC | \ +				 1 << BNX2X_ETH_MAC | \ +				 1 << BNX2X_ISCSI_ETH_MAC | \ +				 1 << BNX2X_NETQ_ETH_MAC) +#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \ +	((flags) & BNX2X_VLAN_MAC_CMP_MASK)  struct bnx2x_vlan_mac_ramrod_params {  	/* Object to run the command from */ @@ -441,9 +448,6 @@ enum {  	BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2  }; -void bnx2x_set_mac_in_nig(struct bnx2x *bp, -			  bool add, unsigned char *dev_addr, int index); -  /** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */  /* RX_MODE ramrod special flags: set in rx_mode_flags field in @@ -763,7 +767,9 @@ enum {  	BNX2X_Q_UPDATE_DEF_VLAN_EN,  	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,  	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, -	BNX2X_Q_UPDATE_SILENT_VLAN_REM +	BNX2X_Q_UPDATE_SILENT_VLAN_REM, +	BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, +	BNX2X_Q_UPDATE_TX_SWITCHING  };  /* Allowed Queue states */ @@ -887,6 +893,24 @@ struct bnx2x_queue_update_params {  	u8		cid_index;  }; +struct bnx2x_queue_update_tpa_params { +	dma_addr_t sge_map; +	u8 update_ipv4; +	u8 update_ipv6; +	u8 max_tpa_queues; +	u8 max_sges_pkt; +	u8 complete_on_both_clients; +	u8 dont_verify_thr; +	u8 tpa_mode; +	u8 _pad; + +	u16 sge_buff_sz; +	u16 max_agg_sz; + +	u16 sge_pause_thr_low; +	u16 sge_pause_thr_high; +}; +  struct rxq_pause_params {  	u16		bd_th_lo;  	u16		bd_th_hi; @@ -981,6 +1005,7 @@ struct bnx2x_queue_state_params {  	/* Params according to the current command */  	union {  		struct bnx2x_queue_update_params	update; +		struct bnx2x_queue_update_tpa_params    update_tpa;  		struct bnx2x_queue_setup_params		setup;  		struct bnx2x_queue_init_params		init;  		struct bnx2x_queue_setup_tx_only_params	tx_only; @@ -1300,22 +1325,12 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,  			 unsigned long *pstate, bnx2x_obj_type type,  			 struct bnx2x_credit_pool_obj *vlans_pool); -void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, -			     struct bnx2x_vlan_mac_obj *vlan_mac_obj, -			     u8 cl_id, u32 cid, u8 func_id, void *rdata, -			     dma_addr_t rdata_mapping, int state, -			     unsigned long *pstate, bnx2x_obj_type type, -			     struct bnx2x_credit_pool_obj *macs_pool, -			     struct bnx2x_credit_pool_obj *vlans_pool); -  int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,  					struct bnx2x_vlan_mac_obj *o);  void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,  				  struct bnx2x_vlan_mac_obj *o);  int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,  				struct bnx2x_vlan_mac_obj *o); -void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp, -					  struct bnx2x_vlan_mac_obj *o);  int bnx2x_config_vlan_mac(struct bnx2x *bp,  			   struct bnx2x_vlan_mac_ramrod_params *p); @@ -1407,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,  void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,  			     u8 *ind_table); -int validate_vlan_mac(struct bnx2x *bp, -		      struct bnx2x_vlan_mac_obj *vlan_mac);  #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9ad012bdd91..eda8583f6fc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -12,9 +12,9 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Shmulik Ravid <shmulikr@broadcom.com> - *	       Ariel Elior <ariele@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + *	       Ariel Elior <ariel.elior@qlogic.com>   *   */  #include "bnx2x.h" @@ -102,81 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,  	mmiowb();  	barrier();  } -/* VFOP - VF slow-path operation support */ -#define BNX2X_VFOP_FILTER_ADD_CNT_MAX		0x10000 +static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, +				       struct bnx2x_virtf *vf, +				       bool print_err) +{ +	if (!bnx2x_leading_vfq(vf, sp_initialized)) { +		if (print_err) +			BNX2X_ERR("Slowpath objects not yet initialized!\n"); +		else +			DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); +		return false; +	} +	return true; +}  /* VFOP operations states */ -enum bnx2x_vfop_qctor_state { -	   BNX2X_VFOP_QCTOR_INIT, -	   BNX2X_VFOP_QCTOR_SETUP, -	   BNX2X_VFOP_QCTOR_INT_EN -}; - -enum bnx2x_vfop_qdtor_state { -	   BNX2X_VFOP_QDTOR_HALT, -	   BNX2X_VFOP_QDTOR_TERMINATE, -	   BNX2X_VFOP_QDTOR_CFCDEL, -	   BNX2X_VFOP_QDTOR_DONE -}; - -enum bnx2x_vfop_vlan_mac_state { -	   BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, -	   BNX2X_VFOP_VLAN_MAC_CLEAR, -	   BNX2X_VFOP_VLAN_MAC_CHK_DONE, -	   BNX2X_VFOP_MAC_CONFIG_LIST, -	   BNX2X_VFOP_VLAN_CONFIG_LIST, -	   BNX2X_VFOP_VLAN_CONFIG_LIST_0 -}; - -enum bnx2x_vfop_qsetup_state { -	   BNX2X_VFOP_QSETUP_CTOR, -	   BNX2X_VFOP_QSETUP_VLAN0, -	   BNX2X_VFOP_QSETUP_DONE -}; - -enum bnx2x_vfop_mcast_state { -	   BNX2X_VFOP_MCAST_DEL, -	   BNX2X_VFOP_MCAST_ADD, -	   BNX2X_VFOP_MCAST_CHK_DONE -}; -enum bnx2x_vfop_qflr_state { -	   BNX2X_VFOP_QFLR_CLR_VLAN, -	   BNX2X_VFOP_QFLR_CLR_MAC, -	   BNX2X_VFOP_QFLR_TERMINATE, -	   BNX2X_VFOP_QFLR_DONE -}; - -enum bnx2x_vfop_flr_state { -	   BNX2X_VFOP_FLR_QUEUES, -	   BNX2X_VFOP_FLR_HW -}; - -enum bnx2x_vfop_close_state { -	   BNX2X_VFOP_CLOSE_QUEUES, -	   BNX2X_VFOP_CLOSE_HW -}; - -enum bnx2x_vfop_rxmode_state { -	   BNX2X_VFOP_RXMODE_CONFIG, -	   BNX2X_VFOP_RXMODE_DONE -}; - -enum bnx2x_vfop_qteardown_state { -	   BNX2X_VFOP_QTEARDOWN_RXMODE, -	   BNX2X_VFOP_QTEARDOWN_CLR_VLAN, -	   BNX2X_VFOP_QTEARDOWN_CLR_MAC, -	   BNX2X_VFOP_QTEARDOWN_QDTOR, -	   BNX2X_VFOP_QTEARDOWN_DONE -}; - -enum bnx2x_vfop_rss_state { -	   BNX2X_VFOP_RSS_CONFIG, -	   BNX2X_VFOP_RSS_DONE -}; - -#define bnx2x_vfop_reset_wq(vf)	atomic_set(&vf->op_in_progress, 0) -  void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,  			      struct bnx2x_queue_init_params *init_params,  			      struct bnx2x_queue_setup_params *setup_params, @@ -220,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,  void bnx2x_vfop_qctor_prep(struct bnx2x *bp,  			   struct bnx2x_virtf *vf,  			   struct bnx2x_vf_queue *q, -			   struct bnx2x_vfop_qctor_params *p, +			   struct bnx2x_vf_queue_construct_params *p,  			   unsigned long q_type)  {  	struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; @@ -289,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,  	}  } -/* VFOP queue construction */ -static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_create(struct bnx2x *bp, +				 struct bnx2x_virtf *vf, int qid, +				 struct bnx2x_vf_queue_construct_params *qctor)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; -	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; -	enum bnx2x_vfop_qctor_state state = vfop->state; - -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_QCTOR_INIT: - -		/* has this queue already been opened? */ -		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == -		    BNX2X_Q_LOGICAL_STATE_ACTIVE) { -			DP(BNX2X_MSG_IOV, -			   "Entered qctor but queue was already up. Aborting gracefully\n"); -			goto op_done; -		} - -		/* next state */ -		vfop->state = BNX2X_VFOP_QCTOR_SETUP; - -		q_params->cmd = BNX2X_Q_CMD_INIT; -		vfop->rc = bnx2x_queue_state_change(bp, q_params); - -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - -	case BNX2X_VFOP_QCTOR_SETUP: -		/* next state */ -		vfop->state = BNX2X_VFOP_QCTOR_INT_EN; - -		/* copy pre-prepared setup params to the queue-state params */ -		vfop->op_p->qctor.qstate.params.setup = -			vfop->op_p->qctor.prep_qsetup; - -		q_params->cmd = BNX2X_Q_CMD_SETUP; -		vfop->rc = bnx2x_queue_state_change(bp, q_params); +	struct bnx2x_queue_state_params *q_params; +	int rc = 0; -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); +	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); -	case BNX2X_VFOP_QCTOR_INT_EN: +	/* Prepare ramrod information */ +	q_params = &qctor->qstate; +	q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); +	set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); -		/* enable interrupts */ -		bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), -				    USTORM_ID, 0, IGU_INT_ENABLE, 0); -		goto op_done; -	default: -		bnx2x_vfop_default(state); +	if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == +	    BNX2X_Q_LOGICAL_STATE_ACTIVE) { +		DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); +		goto out;  	} -op_err: -	BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", -		  vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); -op_done: -	bnx2x_vfop_end(bp, vf, vfop); -op_pending: -	return; -} -static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, -				struct bnx2x_virtf *vf, -				struct bnx2x_vfop_cmd *cmd, -				int qid) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - -	if (vfop) { -		vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); +	/* Run Queue 'construction' ramrods */ +	q_params->cmd = BNX2X_Q_CMD_INIT; +	rc = bnx2x_queue_state_change(bp, q_params); +	if (rc) +		goto out; -		vfop->args.qctor.qid = qid; -		vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); +	memcpy(&q_params->params.setup, &qctor->prep_qsetup, +	       sizeof(struct bnx2x_queue_setup_params)); +	q_params->cmd = BNX2X_Q_CMD_SETUP; +	rc = bnx2x_queue_state_change(bp, q_params); +	if (rc) +		goto out; -		bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, -				 bnx2x_vfop_qctor, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, -					     cmd->block); -	} -	return -ENOMEM; +	/* enable interrupts */ +	bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), +			    USTORM_ID, 0, IGU_INT_ENABLE, 0); +out: +	return rc;  } -/* VFOP queue destruction */ -static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, +				  int qid)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; -	struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; -	enum bnx2x_vfop_qdtor_state state = vfop->state; - -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_QDTOR_HALT: - -		/* has this queue already been stopped? */ -		if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == -		    BNX2X_Q_LOGICAL_STATE_STOPPED) { -			DP(BNX2X_MSG_IOV, -			   "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - -			/* next state */ -			vfop->state = BNX2X_VFOP_QDTOR_DONE; - -			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); -		} - -		/* next state */ -		vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; +	enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, +				       BNX2X_Q_CMD_TERMINATE, +				       BNX2X_Q_CMD_CFC_DEL}; +	struct bnx2x_queue_state_params q_params; +	int rc, i; -		q_params->cmd = BNX2X_Q_CMD_HALT; -		vfop->rc = bnx2x_queue_state_change(bp, q_params); +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); +	/* Prepare ramrod information */ +	memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); +	q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); +	set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); -	case BNX2X_VFOP_QDTOR_TERMINATE: -		/* next state */ -		vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; - -		q_params->cmd = BNX2X_Q_CMD_TERMINATE; -		vfop->rc = bnx2x_queue_state_change(bp, q_params); - -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - -	case BNX2X_VFOP_QDTOR_CFCDEL: -		/* next state */ -		vfop->state = BNX2X_VFOP_QDTOR_DONE; - -		q_params->cmd = BNX2X_Q_CMD_CFC_DEL; -		vfop->rc = bnx2x_queue_state_change(bp, q_params); +	if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == +	    BNX2X_Q_LOGICAL_STATE_STOPPED) { +		DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); +		goto out; +	} -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: -	BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", -		  vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); -op_done: -	case BNX2X_VFOP_QDTOR_DONE: -		/* invalidate the context */ -		if (qdtor->cxt) { -			qdtor->cxt->ustorm_ag_context.cdu_usage = 0; -			qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; +	/* Run Queue 'destruction' ramrods */ +	for (i = 0; i < ARRAY_SIZE(cmds); i++) { +		q_params.cmd = cmds[i]; +		rc = bnx2x_queue_state_change(bp, &q_params); +		if (rc) { +			BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); +			return rc;  		} -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state);  	} -op_pending: -	return; -} - -static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, -				struct bnx2x_virtf *vf, -				struct bnx2x_vfop_cmd *cmd, -				int qid) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - -	if (vfop) { -		struct bnx2x_queue_state_params *qstate = -			&vf->op_params.qctor.qstate; - -		memset(qstate, 0, sizeof(*qstate)); -		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); - -		vfop->args.qdtor.qid = qid; -		vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); - -		bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, -				 bnx2x_vfop_qdtor, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, -					     cmd->block); +out: +	/* Clean Context */ +	if (bnx2x_vfq(vf, qid, cxt)) { +		bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; +		bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;  	} -	DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", -	   vf->abs_vfid, vfop->rc); -	return -ENOMEM; + +	return 0;  }  static void @@ -495,861 +330,373 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)  	BP_VFDB(bp)->vf_sbs_pool++;  } -/* VFOP MAC/VLAN helpers */ -static inline void bnx2x_vfop_credit(struct bnx2x *bp, -				     struct bnx2x_vfop *vfop, -				     struct bnx2x_vlan_mac_obj *obj) -{ -	struct bnx2x_vfop_args_filters *args = &vfop->args.filters; - -	/* update credit only if there is no error -	 * and a valid credit counter -	 */ -	if (!vfop->rc && args->credit) { -		struct list_head *pos; -		int read_lock; -		int cnt = 0; - -		read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); -		if (read_lock) -			DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); - -		list_for_each(pos, &obj->head) -			cnt++; - -		if (!read_lock) -			bnx2x_vlan_mac_h_read_unlock(bp, obj); - -		atomic_set(args->credit, cnt); -	} -} - -static int bnx2x_vfop_set_user_req(struct bnx2x *bp, -				    struct bnx2x_vfop_filter *pos, -				    struct bnx2x_vlan_mac_data *user_req) -{ -	user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : -		BNX2X_VLAN_MAC_DEL; - -	switch (pos->type) { -	case BNX2X_VFOP_FILTER_MAC: -		memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); -		break; -	case BNX2X_VFOP_FILTER_VLAN: -		user_req->u.vlan.vlan = pos->vid; -		break; -	default: -		BNX2X_ERR("Invalid filter type, skipping\n"); -		return 1; -	} -	return 0; -} - -static int bnx2x_vfop_config_list(struct bnx2x *bp, -				  struct bnx2x_vfop_filters *filters, -				  struct bnx2x_vlan_mac_ramrod_params *vlan_mac) +static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, +					struct bnx2x_vlan_mac_obj *obj, +					atomic_t *counter)  { -	struct bnx2x_vfop_filter *pos, *tmp; -	struct list_head rollback_list, *filters_list = &filters->head; -	struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; -	int rc = 0, cnt = 0; +	struct list_head *pos; +	int read_lock; +	int cnt = 0; -	INIT_LIST_HEAD(&rollback_list); +	read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); +	if (read_lock) +		DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); -	list_for_each_entry_safe(pos, tmp, filters_list, link) { -		if (bnx2x_vfop_set_user_req(bp, pos, user_req)) -			continue; +	list_for_each(pos, &obj->head) +		cnt++; -		rc = bnx2x_config_vlan_mac(bp, vlan_mac); -		if (rc >= 0) { -			cnt += pos->add ? 1 : -1; -			list_move(&pos->link, &rollback_list); -			rc = 0; -		} else if (rc == -EEXIST) { -			rc = 0; -		} else { -			BNX2X_ERR("Failed to add a new vlan_mac command\n"); -			break; -		} -	} +	if (!read_lock) +		bnx2x_vlan_mac_h_read_unlock(bp, obj); -	/* rollback if error or too many rules added */ -	if (rc || cnt > filters->add_cnt) { -		BNX2X_ERR("error or too many rules added. Performing rollback\n"); -		list_for_each_entry_safe(pos, tmp, &rollback_list, link) { -			pos->add = !pos->add;	/* reverse op */ -			bnx2x_vfop_set_user_req(bp, pos, user_req); -			bnx2x_config_vlan_mac(bp, vlan_mac); -			list_del(&pos->link); -		} -		cnt = 0; -		if (!rc) -			rc = -EINVAL; -	} -	filters->add_cnt = cnt; -	return rc; +	atomic_set(counter, cnt);  } -/* VFOP set VLAN/MAC */ -static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, +				   int qid, bool drv_only, bool mac)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; -	struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; -	struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; - -	enum bnx2x_vfop_vlan_mac_state state = vfop->state; - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	bnx2x_vfop_reset_wq(vf); - -	switch (state) { -	case BNX2X_VFOP_VLAN_MAC_CLEAR: -		/* next state */ -		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - -		/* do delete */ -		vfop->rc = obj->delete_all(bp, obj, -					   &vlan_mac->user_req.vlan_mac_flags, -					   &vlan_mac->ramrod_flags); - -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - -	case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: -		/* next state */ -		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - -		/* do config */ -		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); -		if (vfop->rc == -EEXIST) -			vfop->rc = 0; - -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - -	case BNX2X_VFOP_VLAN_MAC_CHK_DONE: -		vfop->rc = !!obj->raw.check_pending(&obj->raw); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - -	case BNX2X_VFOP_MAC_CONFIG_LIST: -		/* next state */ -		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - -		/* do list config */ -		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); -		if (vfop->rc) -			goto op_err; - -		set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); -		vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - -	case BNX2X_VFOP_VLAN_CONFIG_LIST: -		/* next state */ -		vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; +	struct bnx2x_vlan_mac_ramrod_params ramrod; +	int rc; -		/* do list config */ -		vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); -		if (!vfop->rc) { -			set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); -			vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); -		} -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, +	   mac ? "MACs" : "VLANs"); -	default: -		bnx2x_vfop_default(state); +	/* Prepare ramrod params */ +	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); +	if (mac) { +		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); +		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); +	} else { +		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, +			&ramrod.user_req.vlan_mac_flags); +		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); +	} +	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; + +	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); +	if (drv_only) +		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); +	else +		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); + +	/* Start deleting */ +	rc = ramrod.vlan_mac_obj->delete_all(bp, +					     ramrod.vlan_mac_obj, +					     &ramrod.user_req.vlan_mac_flags, +					     &ramrod.ramrod_flags); +	if (rc) { +		BNX2X_ERR("Failed to delete all %s\n", +			  mac ? "MACs" : "VLANs"); +		return rc;  	} -op_err: -	BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); -op_done: -	kfree(filters); -	bnx2x_vfop_credit(bp, vfop, obj); -	bnx2x_vfop_end(bp, vf, vfop); -op_pending: -	return; -} - -struct bnx2x_vfop_vlan_mac_flags { -	bool drv_only; -	bool dont_consume; -	bool single_cmd; -	bool add; -}; - -static void -bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, -				struct bnx2x_vfop_vlan_mac_flags *flags) -{ -	struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; - -	memset(ramrod, 0, sizeof(*ramrod)); - -	/* ramrod flags */ -	if (flags->drv_only) -		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); -	if (flags->single_cmd) -		set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); -	/* mac_vlan flags */ -	if (flags->dont_consume) -		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); +	/* Clear the vlan counters */ +	if (!mac) +		atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); -	/* cmd */ -	ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; -} - -static inline void -bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, -			   struct bnx2x_vfop_vlan_mac_flags *flags) -{ -	bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); -	set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); +	return 0;  } -static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, -				     struct bnx2x_virtf *vf, -				     struct bnx2x_vfop_cmd *cmd, -				     int qid, bool drv_only) +static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, +				    struct bnx2x_virtf *vf, int qid, +				    struct bnx2x_vf_mac_vlan_filter *filter, +				    bool drv_only)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); +	struct bnx2x_vlan_mac_ramrod_params ramrod;  	int rc; -	if (vfop) { -		struct bnx2x_vfop_args_filters filters = { -			.multi_filter = NULL,	/* single */ -			.credit = NULL,		/* consume credit */ -		}; -		struct bnx2x_vfop_vlan_mac_flags flags = { -			.drv_only = drv_only, -			.dont_consume = (filters.credit != NULL), -			.single_cmd = true, -			.add = false /* don't care */, -		}; -		struct bnx2x_vlan_mac_ramrod_params *ramrod = -			&vf->op_params.vlan_mac; - -		/* set ramrod params */ -		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - -		/* set object */ -		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); -		if (rc) -			return rc; -		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); - -		/* set extra args */ -		vfop->args.filters = filters; - -		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, -				 bnx2x_vfop_vlan_mac, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, -					     cmd->block); +	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", +	   vf->abs_vfid, filter->add ? "Adding" : "Deleting", +	   filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); + +	/* Prepare ramrod params */ +	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); +	if (filter->type == BNX2X_VF_FILTER_VLAN) { +		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, +			&ramrod.user_req.vlan_mac_flags); +		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); +		ramrod.user_req.u.vlan.vlan = filter->vid; +	} else { +		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); +		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); +		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); +	} +	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : +					    BNX2X_VLAN_MAC_DEL; + +	/* Verify there are available vlan credits */ +	if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && +	    (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= +	     vf_vlan_rules_cnt(vf))) { +		BNX2X_ERR("No credits for vlan [%d >= %d]\n", +			  atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), +			  vf_vlan_rules_cnt(vf)); +		return -ENOMEM;  	} -	return -ENOMEM; -} -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, -			    struct bnx2x_virtf *vf, -			    struct bnx2x_vfop_cmd *cmd, -			    struct bnx2x_vfop_filters *macs, -			    int qid, bool drv_only) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	int rc; +	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); +	if (drv_only) +		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); +	else +		set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); -	if (vfop) { -		struct bnx2x_vfop_args_filters filters = { -			.multi_filter = macs, -			.credit = NULL,		/* consume credit */ -		}; -		struct bnx2x_vfop_vlan_mac_flags flags = { -			.drv_only = drv_only, -			.dont_consume = (filters.credit != NULL), -			.single_cmd = false, -			.add = false, /* don't care since only the items in the -				       * filters list affect the sp operation, -				       * not the list itself -				       */ -		}; -		struct bnx2x_vlan_mac_ramrod_params *ramrod = -			&vf->op_params.vlan_mac; - -		/* set ramrod params */ -		bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); - -		/* set object */ -		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); -		if (rc) -			return rc; -		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); +	/* Add/Remove the filter */ +	rc = bnx2x_config_vlan_mac(bp, &ramrod); +	if (rc && rc != -EEXIST) { +		BNX2X_ERR("Failed to %s %s\n", +			  filter->add ? "add" : "delete", +			  filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : +								"VLAN"); +		return rc; +	} -		/* set extra args */ -		filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; -		vfop->args.filters = filters; +	/* Update the vlan counters */ +	if (filter->type == BNX2X_VF_FILTER_VLAN) +		bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, +				     &bnx2x_vfq(vf, qid, vlan_count)); -		bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, -				 bnx2x_vfop_vlan_mac, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, -					     cmd->block); -	} -	return -ENOMEM; +	return 0;  } -int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, -			    struct bnx2x_virtf *vf, -			    struct bnx2x_vfop_cmd *cmd, -			    int qid, u16 vid, bool add) +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, +				  struct bnx2x_vf_mac_vlan_filters *filters, +				  int qid, bool drv_only)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	int rc; +	int rc = 0, i; -	if (vfop) { -		struct bnx2x_vfop_args_filters filters = { -			.multi_filter = NULL, /* single command */ -			.credit = &bnx2x_vfq(vf, qid, vlan_count), -		}; -		struct bnx2x_vfop_vlan_mac_flags flags = { -			.drv_only = false, -			.dont_consume = (filters.credit != NULL), -			.single_cmd = true, -			.add = add, -		}; -		struct bnx2x_vlan_mac_ramrod_params *ramrod = -			&vf->op_params.vlan_mac; - -		/* set ramrod params */ -		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); -		ramrod->user_req.u.vlan.vlan = vid; - -		/* set object */ -		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); -		if (rc) -			return rc; -		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); -		/* set extra args */ -		vfop->args.filters = filters; +	if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) +		return -EINVAL; -		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, -				 bnx2x_vfop_vlan_mac, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, -					     cmd->block); +	/* Prepare ramrod params */ +	for (i = 0; i < filters->count; i++) { +		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, +					      &filters->filters[i], drv_only); +		if (rc) +			break;  	} -	return -ENOMEM; -} - -static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, -			       struct bnx2x_virtf *vf, -			       struct bnx2x_vfop_cmd *cmd, -			       int qid, bool drv_only) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	int rc; -	if (vfop) { -		struct bnx2x_vfop_args_filters filters = { -			.multi_filter = NULL, /* single command */ -			.credit = &bnx2x_vfq(vf, qid, vlan_count), -		}; -		struct bnx2x_vfop_vlan_mac_flags flags = { -			.drv_only = drv_only, -			.dont_consume = (filters.credit != NULL), -			.single_cmd = true, -			.add = false, /* don't care */ -		}; -		struct bnx2x_vlan_mac_ramrod_params *ramrod = -			&vf->op_params.vlan_mac; - -		/* set ramrod params */ -		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - -		/* set object */ -		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); -		if (rc) -			return rc; -		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); +	/* Rollback if needed */ +	if (i != filters->count) { +		BNX2X_ERR("Managed only %d/%d filters - rolling back\n", +			  i, filters->count + 1); +		while (--i >= 0) { +			filters->filters[i].add = !filters->filters[i].add; +			bnx2x_vf_mac_vlan_config(bp, vf, qid, +						 &filters->filters[i], +						 drv_only); +		} +	} -		/* set extra args */ -		vfop->args.filters = filters; +	/* It's our responsibility to free the filters */ +	kfree(filters); -		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, -				 bnx2x_vfop_vlan_mac, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, -					     cmd->block); -	} -	return -ENOMEM; +	return rc;  } -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, -			     struct bnx2x_virtf *vf, -			     struct bnx2x_vfop_cmd *cmd, -			     struct bnx2x_vfop_filters *vlans, -			     int qid, bool drv_only) +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, +			 struct bnx2x_vf_queue_construct_params *qctor)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);  	int rc; -	if (vfop) { -		struct bnx2x_vfop_args_filters filters = { -			.multi_filter = vlans, -			.credit = &bnx2x_vfq(vf, qid, vlan_count), -		}; -		struct bnx2x_vfop_vlan_mac_flags flags = { -			.drv_only = drv_only, -			.dont_consume = (filters.credit != NULL), -			.single_cmd = false, -			.add = false, /* don't care */ -		}; -		struct bnx2x_vlan_mac_ramrod_params *ramrod = -			&vf->op_params.vlan_mac; - -		/* set ramrod params */ -		bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); - -		/* set object */ -		rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); -		if (rc) -			return rc; -		ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); - -		/* set extra args */ -		filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - -			atomic_read(filters.credit); - -		vfop->args.filters = filters; - -		bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, -				 bnx2x_vfop_vlan_mac, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, -					     cmd->block); -	} -	return -ENOMEM; -} - -/* VFOP queue setup (queue constructor + set vlan 0) */ -static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	int qid = vfop->args.qctor.qid; -	enum bnx2x_vfop_qsetup_state state = vfop->state; -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vfop_qsetup, -		.block = false, -	}; +	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); -	if (vfop->rc < 0) +	rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); +	if (rc)  		goto op_err; -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_QSETUP_CTOR: -		/* init the queue ctor command */ -		vfop->state = BNX2X_VFOP_QSETUP_VLAN0; -		vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); -		if (vfop->rc) -			goto op_err; -		return; - -	case BNX2X_VFOP_QSETUP_VLAN0: -		/* skip if non-leading or FPGA/EMU*/ -		if (qid) -			goto op_done; +	/* Configure vlan0 for leading queue */ +	if (!qid) { +		struct bnx2x_vf_mac_vlan_filter filter; -		/* init the queue set-vlan command (for vlan 0) */ -		vfop->state = BNX2X_VFOP_QSETUP_DONE; -		vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); -		if (vfop->rc) +		memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); +		filter.type = BNX2X_VF_FILTER_VLAN; +		filter.add = true; +		filter.vid = 0; +		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); +		if (rc)  			goto op_err; -		return; -op_err: -	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); -op_done: -	case BNX2X_VFOP_QSETUP_DONE: -		vf->cfg_flags |= VF_CFG_VLAN; -		smp_mb__before_clear_bit(); -		set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, -			&bp->sp_rtnl_state); -		smp_mb__after_clear_bit(); -		schedule_delayed_work(&bp->sp_rtnl_task, 0); -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state);  	} -} - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, -			  struct bnx2x_virtf *vf, -			  struct bnx2x_vfop_cmd *cmd, -			  int qid) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - -	if (vfop) { -		vfop->args.qctor.qid = qid; -		bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, -				 bnx2x_vfop_qsetup, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, -					     cmd->block); -	} -	return -ENOMEM; +	/* Schedule the configuration of any pending vlan filters */ +	vf->cfg_flags |= VF_CFG_VLAN; +	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, +			       BNX2X_MSG_IOV); +	return 0; +op_err: +	BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); +	return rc;  } -/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ -static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, +			       int qid)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	int qid = vfop->args.qx.qid; -	enum bnx2x_vfop_qflr_state state = vfop->state; -	struct bnx2x_queue_state_params *qstate; -	struct bnx2x_vfop_cmd cmd; - -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); +	int rc; -	cmd.done = bnx2x_vfop_qflr; -	cmd.block = false; +	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); -	switch (state) { -	case BNX2X_VFOP_QFLR_CLR_VLAN: -		/* vlan-clear-all: driver-only, don't consume credit */ -		vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; -		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) -			vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, -							      true); -		if (vfop->rc) +	/* If needed, clean the filtering data base */ +	if ((qid == LEADING_IDX) && +	    bnx2x_validate_vf_sp_objs(bp, vf, false)) { +		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); +		if (rc)  			goto op_err; -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - -	case BNX2X_VFOP_QFLR_CLR_MAC: -		/* mac-clear-all: driver only consume credit */ -		vfop->state = BNX2X_VFOP_QFLR_TERMINATE; -		if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) -			vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, -							     true); -		DP(BNX2X_MSG_IOV, -		   "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", -		   vf->abs_vfid, vfop->rc); -		if (vfop->rc) +		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); +		if (rc)  			goto op_err; -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - -	case BNX2X_VFOP_QFLR_TERMINATE: -		qstate = &vfop->op_p->qctor.qstate; -		memset(qstate , 0, sizeof(*qstate)); -		qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); -		vfop->state = BNX2X_VFOP_QFLR_DONE; - -		DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", -		   vf->abs_vfid, qstate->q_obj->state); - -		if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { -			qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; -			qstate->cmd = BNX2X_Q_CMD_TERMINATE; -			vfop->rc = bnx2x_queue_state_change(bp, qstate); -			bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); -		} else { -			goto op_done; -		} - -op_err: -	BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", -		  vf->abs_vfid, qid, vfop->rc); -op_done: -	case BNX2X_VFOP_QFLR_DONE: -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state);  	} -op_pending: -	return; -} -static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, -			       struct bnx2x_virtf *vf, -			       struct bnx2x_vfop_cmd *cmd, -			       int qid) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); +	/* Terminate queue */ +	if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { +		struct bnx2x_queue_state_params qstate; -	if (vfop) { -		vfop->args.qx.qid = qid; -		bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, -				 bnx2x_vfop_qflr, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, -					     cmd->block); +		memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); +		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); +		qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; +		qstate.cmd = BNX2X_Q_CMD_TERMINATE; +		set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); +		rc = bnx2x_queue_state_change(bp, &qstate); +		if (rc) +			goto op_err;  	} -	return -ENOMEM; -} - -/* VFOP multi-casts */ -static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; -	struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; -	struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; -	enum bnx2x_vfop_mcast_state state = vfop->state; -	int i; -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_MCAST_DEL: -		/* clear existing mcasts */ -		vfop->state = BNX2X_VFOP_MCAST_ADD; -		vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); - -	case BNX2X_VFOP_MCAST_ADD: -		if (raw->check_pending(raw)) -			goto op_pending; - -		if (args->mc_num) { -			/* update mcast list on the ramrod params */ -			INIT_LIST_HEAD(&mcast->mcast_list); -			for (i = 0; i < args->mc_num; i++) -				list_add_tail(&(args->mc[i].link), -					      &mcast->mcast_list); -			/* add new mcasts */ -			vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; -			vfop->rc = bnx2x_config_mcast(bp, mcast, -						      BNX2X_MCAST_CMD_ADD); -		} -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); - -	case BNX2X_VFOP_MCAST_CHK_DONE: -		vfop->rc = raw->check_pending(raw) ? 1 : 0; -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -	default: -		bnx2x_vfop_default(state); -	} +	return 0;  op_err: -	BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); -op_done: -	kfree(args->mc); -	bnx2x_vfop_end(bp, vf, vfop); -op_pending: -	return; +	BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); +	return rc;  } -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd, -			 bnx2x_mac_addr_t *mcasts, -			 int mcast_num, bool drv_only) -{ -	struct bnx2x_vfop *vfop = NULL; -	size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); -	struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : -					   NULL; - -	if (!mc_sz || mc) { -		vfop = bnx2x_vfop_add(bp, vf); -		if (vfop) { -			int i; -			struct bnx2x_mcast_ramrod_params *ramrod = -				&vf->op_params.mcast; - -			/* set ramrod params */ -			memset(ramrod, 0, sizeof(*ramrod)); -			ramrod->mcast_obj = &vf->mcast_obj; -			if (drv_only) -				set_bit(RAMROD_DRV_CLR_ONLY, -					&ramrod->ramrod_flags); - -			/* copy mcasts pointers */ -			vfop->args.mc_list.mc_num = mcast_num; -			vfop->args.mc_list.mc = mc; -			for (i = 0; i < mcast_num; i++) -				mc[i].mac = mcasts[i]; - -			bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, -					 bnx2x_vfop_mcast, cmd->done); -			return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, -						     cmd->block); -		} else { -			kfree(mc); +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, +		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) +{ +	struct bnx2x_mcast_list_elem *mc = NULL; +	struct bnx2x_mcast_ramrod_params mcast; +	int rc, i; + +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + +	/* Prepare Multicast command */ +	memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); +	mcast.mcast_obj = &vf->mcast_obj; +	if (drv_only) +		set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); +	else +		set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); +	if (mc_num) { +		mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), +			     GFP_KERNEL); +		if (!mc) { +			BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); +			return -ENOMEM;  		}  	} -	return -ENOMEM; -} - -/* VFOP rx-mode */ -static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; -	enum bnx2x_vfop_rxmode_state state = vfop->state; - -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); +	/* clear existing mcasts */ +	mcast.mcast_list_len = vf->mcast_list_len; +	vf->mcast_list_len = mc_num; +	rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); +	if (rc) { +		BNX2X_ERR("Failed to remove multicasts\n"); +		if (mc) +			kfree(mc); +		return rc; +	} -	switch (state) { -	case BNX2X_VFOP_RXMODE_CONFIG: -		/* next state */ -		vfop->state = BNX2X_VFOP_RXMODE_DONE; +	/* update mcast list on the ramrod params */ +	if (mc_num) { +		INIT_LIST_HEAD(&mcast.mcast_list); +		for (i = 0; i < mc_num; i++) { +			mc[i].mac = mcasts[i]; +			list_add_tail(&mc[i].link, +				      &mcast.mcast_list); +		} -		vfop->rc = bnx2x_config_rx_mode(bp, ramrod); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: -		BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); -op_done: -	case BNX2X_VFOP_RXMODE_DONE: -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state); +		/* add new mcasts */ +		mcast.mcast_list_len = mc_num; +		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); +		if (rc) +			BNX2X_ERR("Faled to add multicasts\n"); +		kfree(mc);  	} -op_pending: -	return; + +	return rc;  } -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, -			  struct bnx2x_virtf *vf, -			  struct bnx2x_vfop_cmd *cmd, -			  int qid, unsigned long accept_flags) +static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, +				  struct bnx2x_rx_mode_ramrod_params *ramrod, +				  struct bnx2x_virtf *vf, +				  unsigned long accept_flags)  {  	struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - -	if (vfop) { -		struct bnx2x_rx_mode_ramrod_params *ramrod = -			&vf->op_params.rx_mode; -		memset(ramrod, 0, sizeof(*ramrod)); - -		/* Prepare ramrod parameters */ -		ramrod->cid = vfq->cid; -		ramrod->cl_id = vfq_cl_id(vf, vfq); -		ramrod->rx_mode_obj = &bp->rx_mode_obj; -		ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); - -		ramrod->rx_accept_flags = accept_flags; -		ramrod->tx_accept_flags = accept_flags; -		ramrod->pstate = &vf->filter_state; -		ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; - -		set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); -		set_bit(RAMROD_RX, &ramrod->ramrod_flags); -		set_bit(RAMROD_TX, &ramrod->ramrod_flags); +	memset(ramrod, 0, sizeof(*ramrod)); +	ramrod->cid = vfq->cid; +	ramrod->cl_id = vfq_cl_id(vf, vfq); +	ramrod->rx_mode_obj = &bp->rx_mode_obj; +	ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); +	ramrod->rx_accept_flags = accept_flags; +	ramrod->tx_accept_flags = accept_flags; +	ramrod->pstate = &vf->filter_state; +	ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; -		ramrod->rdata = -			bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); -		ramrod->rdata_mapping = -			bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); +	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); +	set_bit(RAMROD_RX, &ramrod->ramrod_flags); +	set_bit(RAMROD_TX, &ramrod->ramrod_flags); -		bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, -				 bnx2x_vfop_rxmode, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, -					     cmd->block); -	} -	return -ENOMEM; +	ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); +	ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);  } -/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, - * queue destructor) - */ -static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, +		    int qid, unsigned long accept_flags)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	int qid = vfop->args.qx.qid; -	enum bnx2x_vfop_qteardown_state state = vfop->state; -	struct bnx2x_vfop_cmd cmd; - -	if (vfop->rc < 0) -		goto op_err; +	struct bnx2x_rx_mode_ramrod_params ramrod; -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); -	cmd.done = bnx2x_vfop_qdown; -	cmd.block = false; - -	switch (state) { -	case BNX2X_VFOP_QTEARDOWN_RXMODE: -		/* Drop all */ -		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; -		vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); -		if (vfop->rc) -			goto op_err; -		return; +	bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); +	set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); +	vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; +	return bnx2x_config_rx_mode(bp, &ramrod); +} -	case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: -		/* vlan-clear-all: don't consume credit */ -		vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; -		vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); -		if (vfop->rc) -			goto op_err; -		return; +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) +{ +	int rc; -	case BNX2X_VFOP_QTEARDOWN_CLR_MAC: -		/* mac-clear-all: consume credit */ -		vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; -		vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); -		if (vfop->rc) -			goto op_err; -		return; +	DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); -	case BNX2X_VFOP_QTEARDOWN_QDTOR: -		/* run the queue destruction flow */ -		DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); -		vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; -		DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); -		vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); -		DP(BNX2X_MSG_IOV, "returned from cmd\n"); -		if (vfop->rc) +	/* Remove all classification configuration for leading queue */ +	if (qid == LEADING_IDX) { +		rc = bnx2x_vf_rxmode(bp, vf, qid, 0); +		if (rc)  			goto op_err; -		return; -op_err: -	BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", -		  vf->abs_vfid, qid, vfop->rc); -	case BNX2X_VFOP_QTEARDOWN_DONE: -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state); -	} -} - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd, -			 int qid) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); - -	/* for non leading queues skip directly to qdown sate */ -	if (vfop) { -		vfop->args.qx.qid = qid; -		bnx2x_vfop_opset(qid == LEADING_IDX ? -				 BNX2X_VFOP_QTEARDOWN_RXMODE : -				 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, -				 cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, -					     cmd->block); +		/* Remove filtering if feasible */ +		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { +			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, +						     false, false); +			if (rc) +				goto op_err; +			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, +						     false, true); +			if (rc) +				goto op_err; +			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); +			if (rc) +				goto op_err; +		}  	} -	return -ENOMEM; +	/* Destroy queue */ +	rc = bnx2x_vf_queue_destroy(bp, vf, qid); +	if (rc) +		goto op_err; +	return rc; +op_err: +	BNX2X_ERR("vf[%d:%d] error: rc %d\n", +		  vf->abs_vfid, qid, rc); +	return rc;  }  /* VF enable primitives @@ -1416,12 +763,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)  	if (vf->cfg_flags & VF_CFG_INT_SIMD)  		val |= IGU_VF_CONF_SINGLE_ISR_EN;  	val &= ~IGU_VF_CONF_PARENT_MASK; -	val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT;	/* parent PF */ +	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;  	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);  	DP(BNX2X_MSG_IOV, -	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", -	   vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); +	   "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", +	   vf->abs_vfid, val);  	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); @@ -1493,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)  	return 0;  } +static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, +					  struct bnx2x_virtf *vf, +					  int new) +{ +	int num = vf_vlan_rules_cnt(vf); +	int diff = new - num; +	bool rc = true; + +	DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", +	   vf->abs_vfid, new, num); + +	if (diff > 0) +		rc = bp->vlans_pool.get(&bp->vlans_pool, diff); +	else if (diff < 0) +		rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); + +	if (rc) +		vf_vlan_rules_cnt(vf) = new; +	else +		DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", +		   vf->abs_vfid); +} +  /* must be called after the number of PF queues and the number of VFs are   * both known   */ @@ -1510,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)  	resc->num_mac_filters = 1;  	/* divvy up vlan rules */ +	bnx2x_iov_re_set_vlan_filters(bp, vf, 0);  	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);  	vlan_count = 1 << ilog2(vlan_count); -	resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); +	bnx2x_iov_re_set_vlan_filters(bp, vf, +				      vlan_count / BNX2X_NR_VIRTFN(bp));  	/* no real limitation */  	resc->num_mc_filters = 0; @@ -1549,120 +921,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)  	bnx2x_tx_hw_flushed(bp, poll_cnt);  } -static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; -	enum bnx2x_vfop_flr_state state = vfop->state; -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vfop_flr, -		.block = false, -	}; - -	if (vfop->rc < 0) -		goto op_err; +	int rc, i; -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); -	switch (state) { -	case BNX2X_VFOP_FLR_QUEUES: -		/* the cleanup operations are valid if and only if the VF -		 * was first acquired. -		 */ -		if (++(qx->qid) < vf_rxq_count(vf)) { -			vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, -						       qx->qid); -			if (vfop->rc) -				goto op_err; -			return; -		} -		/* remove multicasts */ -		vfop->state = BNX2X_VFOP_FLR_HW; -		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, -						0, true); -		if (vfop->rc) -			goto op_err; -		return; -	case BNX2X_VFOP_FLR_HW: - -		/* dispatch final cleanup and wait for HW queues to flush */ -		bnx2x_vf_flr_clnup_hw(bp, vf); +	/* the cleanup operations are valid if and only if the VF +	 * was first acquired. +	 */ +	for (i = 0; i < vf_rxq_count(vf); i++) { +		rc = bnx2x_vf_queue_flr(bp, vf, i); +		if (rc) +			goto out; +	} -		/* release VF resources */ -		bnx2x_vf_free_resc(bp, vf); +	/* remove multicasts */ +	bnx2x_vf_mcast(bp, vf, NULL, 0, true); -		/* re-open the mailbox */ -		bnx2x_vf_enable_mbx(bp, vf->abs_vfid); +	/* dispatch final cleanup and wait for HW queues to flush */ +	bnx2x_vf_flr_clnup_hw(bp, vf); -		goto op_done; -	default: -		bnx2x_vfop_default(state); -	} -op_err: -	BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: -	vf->flr_clnup_stage = VF_FLR_ACK; -	bnx2x_vfop_end(bp, vf, vfop); -	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); -} +	/* release VF resources */ +	bnx2x_vf_free_resc(bp, vf); -static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, -			      struct bnx2x_virtf *vf, -			      vfop_handler_t done) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	if (vfop) { -		vfop->args.qx.qid = -1; /* loop */ -		bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, -				 bnx2x_vfop_flr, done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); -	} -	return -ENOMEM; +	/* re-open the mailbox */ +	bnx2x_vf_enable_mbx(bp, vf->abs_vfid); +	return; +out: +	BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", +		  vf->abs_vfid, i, rc);  } -static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) +static void bnx2x_vf_flr_clnup(struct bnx2x *bp)  { -	int i = prev_vf ? prev_vf->index + 1 : 0;  	struct bnx2x_virtf *vf; +	int i; -	/* find next VF to cleanup */ -next_vf_to_clean: -	for (; -	     i < BNX2X_NR_VIRTFN(bp) && -	     (bnx2x_vf(bp, i, state) != VF_RESET || -	      bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); -	     i++) -		; +	for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { +		/* VF should be RESET & in FLR cleanup states */ +		if (bnx2x_vf(bp, i, state) != VF_RESET || +		    !bnx2x_vf(bp, i, flr_clnup_stage)) +			continue; -	DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, -	   BNX2X_NR_VIRTFN(bp)); +		DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", +		   i, BNX2X_NR_VIRTFN(bp)); -	if (i < BNX2X_NR_VIRTFN(bp)) {  		vf = BP_VF(bp, i);  		/* lock the vf pf channel */  		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);  		/* invoke the VF FLR SM */ -		if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { -			BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", -				  vf->abs_vfid); - -			/* mark the VF to be ACKED and continue */ -			vf->flr_clnup_stage = VF_FLR_ACK; -			goto next_vf_to_clean; -		} -		return; -	} - -	/* we are done, update vf records */ -	for_each_vf(bp, i) { -		vf = BP_VF(bp, i); - -		if (vf->flr_clnup_stage != VF_FLR_ACK) -			continue; +		bnx2x_vf_flr(bp, vf); -		vf->flr_clnup_stage = VF_FLR_EPILOG; +		/* mark the VF to be ACKED and continue */ +		vf->flr_clnup_stage = false; +		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);  	}  	/* Acknowledge the handled VFs. @@ -1712,7 +1027,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)  		if (reset) {  			/* set as reset and ready for cleanup */  			vf->state = VF_RESET; -			vf->flr_clnup_stage = VF_FLR_CLN; +			vf->flr_clnup_stage = true;  			DP(BNX2X_MSG_IOV,  			   "Initiating Final cleanup for VF %d\n", @@ -1721,7 +1036,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)  	}  	/* do the FLR cleanup for all marked VFs*/ -	bnx2x_vf_flr_clnup(bp, NULL); +	bnx2x_vf_flr_clnup(bp);  }  /* IOV global initialization routines  */ @@ -1756,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)  	REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);  	REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); -	/* set the VF doorbell threshold */ -	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); +	/* set the VF doorbell threshold. This threshold represents the amount +	 * of doorbells allowed in the main DORQ fifo for a specific VF. +	 */ +	REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);  }  void bnx2x_iov_init_dmae(struct bnx2x *bp) @@ -1988,7 +1305,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,  		bnx2x_vf(bp, i, index) = i;  		bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;  		bnx2x_vf(bp, i, state) = VF_FREE; -		INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));  		mutex_init(&bnx2x_vf(bp, i, op_mutex));  		bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;  	} @@ -2009,6 +1325,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,  		goto failed;  	} +	/* Prepare the VFs event synchronization mechanism */ +	mutex_init(&bp->vfdb->event_mutex); +  	return 0;  failed:  	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); @@ -2018,6 +1337,8 @@ failed:  void bnx2x_iov_remove_one(struct bnx2x *bp)  { +	int vf_idx; +  	/* if SRIOV is not enabled there's nothing to do */  	if (!IS_SRIOV(bp))  		return; @@ -2026,6 +1347,18 @@ void bnx2x_iov_remove_one(struct bnx2x *bp)  	pci_disable_sriov(bp->pdev);  	DP(BNX2X_MSG_IOV, "sriov disabled\n"); +	/* disable access to all VFs */ +	for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { +		bnx2x_pretend_func(bp, +				   HW_VF_HANDLE(bp, +						bp->vfdb->sriov.first_vf_in_pf + +						vf_idx)); +		DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", +		   bp->vfdb->sriov.first_vf_in_pf + vf_idx); +		bnx2x_vf_enable_internal(bp, 0); +		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); +	} +  	/* free vf database */  	__bnx2x_iov_free_vfdb(bp);  } @@ -2073,7 +1406,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)  		cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);  		if (cxt->size) { -			BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); +			cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); +			if (!cxt->addr) +				goto alloc_mem_err;  		} else {  			cxt->addr = NULL;  			cxt->mapping = 0; @@ -2083,20 +1418,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)  	/* allocate vfs ramrods dma memory - client_init and set_mac */  	tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); -	BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, -			tot_size); +	BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, +						   tot_size); +	if (!BP_VFDB(bp)->sp_dma.addr) +		goto alloc_mem_err;  	BP_VFDB(bp)->sp_dma.size = tot_size;  	/* allocate mailboxes */  	tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; -	BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, -			tot_size); +	BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, +						  tot_size); +	if (!BP_VF_MBX_DMA(bp)->addr) +		goto alloc_mem_err; +  	BP_VF_MBX_DMA(bp)->size = tot_size;  	/* allocate local bulletin boards */  	tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; -	BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, -			&BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); +	BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, +						       tot_size); +	if (!BP_VF_BULLETIN_DMA(bp)->addr) +		goto alloc_mem_err; +  	BP_VF_BULLETIN_DMA(bp)->size = tot_size;  	return 0; @@ -2122,6 +1465,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,  			     bnx2x_vf_sp_map(bp, vf, q_data),  			     q_type); +	/* sp indication is set only when vlan/mac/etc. are initialized */ +	q->sp_initialized = false; +  	DP(BNX2X_MSG_IOV,  	   "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",  	   vf->abs_vfid, q->sp_obj.func_id, q->cid); @@ -2162,10 +1508,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)  		bnx2x_iov_static_resc(bp, vf);  		/* queues are initialized during VF-ACQUIRE */ - -		/* reserve the vf vlan credit */ -		bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); -  		vf->filter_state = 0;  		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -2174,6 +1516,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)  		 *  It needs to be initialized here so that it can be safely  		 *  handled by a subsequent FLR flow.  		 */ +		vf->mcast_list_len = 0;  		bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,  				     0xFF, 0xFF, 0xFF,  				     bnx2x_vf_sp(bp, vf, mcast_rdata), @@ -2224,7 +1567,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)  	/* release all the VFs */  	for_each_vf(bp, i) -		bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ +		bnx2x_vf_release(bp, BP_VF(bp, i));  	return 0;  } @@ -2309,9 +1652,15 @@ static  void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,  				 struct bnx2x_virtf *vf)  { -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic(); +} + +static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, +					   struct bnx2x_virtf *vf) +{ +	vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);  }  int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) @@ -2338,6 +1687,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)  	case EVENT_RING_OPCODE_CLASSIFICATION_RULES:  	case EVENT_RING_OPCODE_MULTICAST_RULES:  	case EVENT_RING_OPCODE_FILTERS_RULES: +	case EVENT_RING_OPCODE_RSS_UPDATE_RULES:  		cid = (elem->message.data.eth_event.echo &  		       BNX2X_SWCID_MASK);  		DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); @@ -2349,8 +1699,9 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)  		goto get_vf;  	case EVENT_RING_OPCODE_MALICIOUS_VF:  		abs_vfid = elem->message.data.malicious_vf_event.vf_id; -		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", -		   abs_vfid, elem->message.data.malicious_vf_event.err_id); +		BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", +			  abs_vfid, +			  elem->message.data.malicious_vf_event.err_id);  		goto get_vf;  	default:  		return 1; @@ -2401,19 +1752,15 @@ get_vf:  		   vf->abs_vfid, qidx);  		bnx2x_vf_handle_filters_eqe(bp, vf);  		break; +	case EVENT_RING_OPCODE_RSS_UPDATE_RULES: +		DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", +		   vf->abs_vfid, qidx); +		bnx2x_vf_handle_rss_update_eqe(bp, vf);  	case EVENT_RING_OPCODE_VF_FLR: -		DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", -		   vf->abs_vfid); -		/* Do nothing for now */ -		break;  	case EVENT_RING_OPCODE_MALICIOUS_VF: -		DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", -		   abs_vfid, elem->message.data.malicious_vf_event.err_id);  		/* Do nothing for now */ -		break; +		return 0;  	} -	/* SRIOV: reschedule any 'in_progress' operations */ -	bnx2x_iov_sp_event(bp, cid, false);  	return 0;  } @@ -2450,23 +1797,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,  	}  } -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) -{ -	struct bnx2x_virtf *vf; - -	/* check if the cid is the VF range */ -	if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) -		return; - -	vf = bnx2x_vf_by_cid(bp, vf_cid); -	if (vf) { -		/* set in_progress flag */ -		atomic_set(&vf->op_in_progress, 1); -		if (queue_work) -			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); -	} -} -  void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)  {  	int i; @@ -2487,10 +1817,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)  	first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -  		(is_fcoe ? 0 : 1); -	DP(BNX2X_MSG_IOV, -	   "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", -	   BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, -	   first_queue_query_index + num_queues_req); +	DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), +	       "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", +	       BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, +	       first_queue_query_index + num_queues_req);  	cur_data_offset = bp->fw_stats_data_mapping +  		offsetof(struct bnx2x_fw_stats_data, queue_stats) + @@ -2504,9 +1834,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)  		struct bnx2x_virtf *vf = BP_VF(bp, i);  		if (vf->state != VF_ENABLED) { -			DP(BNX2X_MSG_IOV, -			   "vf %d not enabled so no stats for it\n", -			   vf->abs_vfid); +			DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), +			       "vf %d not enabled so no stats for it\n", +			       vf->abs_vfid);  			continue;  		} @@ -2548,32 +1878,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)  	bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;  } -void bnx2x_iov_sp_task(struct bnx2x *bp) -{ -	int i; - -	if (!IS_SRIOV(bp)) -		return; -	/* Iterate over all VFs and invoke state transition for VFs with -	 * 'in-progress' slow-path operations -	 */ -	DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); -	for_each_vf(bp, i) { -		struct bnx2x_virtf *vf = BP_VF(bp, i); - -		if (!vf) { -			BNX2X_ERR("VF was null! skipping...\n"); -			continue; -		} - -		if (!list_empty(&vf->op_list_head) && -		    atomic_read(&vf->op_in_progress)) { -			DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); -			bnx2x_vfop_cur(bp, vf)->transition(bp, vf); -		} -	} -} -  static inline  struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)  { @@ -2634,11 +1938,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,  	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);  	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); +	/* Save a vlan filter for the Hypervisor */  	return ((req_resc->num_rxqs <= rxq_cnt) &&  		(req_resc->num_txqs <= txq_cnt) &&  		(req_resc->num_sbs <= vf_sb_count(vf))   &&  		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && -		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); +		(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));  }  /* CORE VF API */ @@ -2694,14 +1999,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,  	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);  	if (resc->num_mac_filters)  		vf_mac_rules_cnt(vf) = resc->num_mac_filters; -	if (resc->num_vlan_filters) -		vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; +	/* Add an additional vlan filter credit for the hypervisor */ +	bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);  	DP(BNX2X_MSG_IOV,  	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",  	   vf_sb_count(vf), vf_rxq_count(vf),  	   vf_txq_count(vf), vf_mac_rules_cnt(vf), -	   vf_vlan_rules_cnt(vf)); +	   vf_vlan_rules_visible_cnt(vf));  	/* Initialize the queues */  	if (!vf->vfqs) { @@ -2802,63 +2107,33 @@ struct set_vf_state_cookie {  	u8 state;  }; -void bnx2x_set_vf_state(void *cookie) +static void bnx2x_set_vf_state(void *cookie)  {  	struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;  	p->vf->state = p->state;  } -/* VFOP close (teardown the queues, delete mcasts and close HW) */ -static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; -	enum bnx2x_vfop_close_state state = vfop->state; -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vfop_close, -		.block = false, -	}; - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_CLOSE_QUEUES: +	int rc = 0, i; -		if (++(qx->qid) < vf_rxq_count(vf)) { -			vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); -			if (vfop->rc) -				goto op_err; -			return; -		} +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); -		/* remove multicasts */ -		vfop->state = BNX2X_VFOP_CLOSE_HW; -		vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); -		if (vfop->rc) +	/* Close all queues */ +	for (i = 0; i < vf_rxq_count(vf); i++) { +		rc = bnx2x_vf_queue_teardown(bp, vf, i); +		if (rc)  			goto op_err; -		return; - -	case BNX2X_VFOP_CLOSE_HW: - -		/* disable the interrupts */ -		DP(BNX2X_MSG_IOV, "disabling igu\n"); -		bnx2x_vf_igu_disable(bp, vf); +	} -		/* disable the VF */ -		DP(BNX2X_MSG_IOV, "clearing qtbl\n"); -		bnx2x_vf_clr_qtbl(bp, vf); +	/* disable the interrupts */ +	DP(BNX2X_MSG_IOV, "disabling igu\n"); +	bnx2x_vf_igu_disable(bp, vf); -		goto op_done; -	default: -		bnx2x_vfop_default(state); -	} -op_err: -	BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: +	/* disable the VF */ +	DP(BNX2X_MSG_IOV, "clearing qtbl\n"); +	bnx2x_vf_clr_qtbl(bp, vf);  	/* need to make sure there are no outstanding stats ramrods which may  	 * cause the device to access the VF's stats buffer which it will free @@ -2873,40 +2148,20 @@ op_done:  	}  	DP(BNX2X_MSG_IOV, "set state to acquired\n"); -	bnx2x_vfop_end(bp, vf, vfop); -} -int bnx2x_vfop_close_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	if (vfop) { -		vfop->args.qx.qid = -1; /* loop */ -		bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, -				 bnx2x_vfop_close, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, -					     cmd->block); -	} -	return -ENOMEM; +	return 0; +op_err: +	BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); +	return rc;  }  /* VF release can be called either: 1. The VF was acquired but   * not enabled 2. the vf was enabled or in the process of being   * enabled   */ -static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vfop_release, -		.block = false, -	}; - -	DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); - -	if (vfop->rc < 0) -		goto op_err; +	int rc;  	DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,  	   vf->state == VF_FREE ? "Free" : @@ -2917,116 +2172,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)  	switch (vf->state) {  	case VF_ENABLED: -		vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); -		if (vfop->rc) +		rc = bnx2x_vf_close(bp, vf); +		if (rc)  			goto op_err; -		return; - +		/* Fallthrough to release resources */  	case VF_ACQUIRED:  		DP(BNX2X_MSG_IOV, "about to free resources\n");  		bnx2x_vf_free_resc(bp, vf); -		DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); -		goto op_done; +		break;  	case VF_FREE:  	case VF_RESET: -		/* do nothing */ -		goto op_done;  	default: -		bnx2x_vfop_default(vf->state); +		break;  	} +	return 0;  op_err: -	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); -op_done: -	bnx2x_vfop_end(bp, vf, vfop); +	BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); +	return rc;  } -static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, +			struct bnx2x_config_rss_params *rss)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	enum bnx2x_vfop_rss_state state; - -	if (!vfop) { -		BNX2X_ERR("vfop was null\n"); -		return; -	} - -	state = vfop->state; -	bnx2x_vfop_reset_wq(vf); - -	if (vfop->rc < 0) -		goto op_err; - -	DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); - -	switch (state) { -	case BNX2X_VFOP_RSS_CONFIG: -		/* next state */ -		vfop->state = BNX2X_VFOP_RSS_DONE; -		bnx2x_config_rss(bp, &vfop->op_p->rss); -		bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); -op_err: -		BNX2X_ERR("RSS error: rc %d\n", vfop->rc); -op_done: -	case BNX2X_VFOP_RSS_DONE: -		bnx2x_vfop_end(bp, vf, vfop); -		return; -	default: -		bnx2x_vfop_default(state); -	} -op_pending: -	return; +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); +	set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); +	return bnx2x_config_rss(bp, rss);  } -int bnx2x_vfop_release_cmd(struct bnx2x *bp, -			   struct bnx2x_virtf *vf, -			   struct bnx2x_vfop_cmd *cmd) +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, +			struct vfpf_tpa_tlv *tlv, +			struct bnx2x_queue_update_tpa_params *params)  { -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	if (vfop) { -		bnx2x_vfop_opset(-1, /* use vf->state */ -				 bnx2x_vfop_release, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, -					     cmd->block); -	} -	return -ENOMEM; -} +	aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; +	struct bnx2x_queue_state_params qstate; +	int qid, rc = 0; -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, -		       struct bnx2x_virtf *vf, -		       struct bnx2x_vfop_cmd *cmd) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); +	DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); + +	/* Set ramrod params */ +	memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); +	memcpy(&qstate.params.update_tpa, params, +	       sizeof(struct bnx2x_queue_update_tpa_params)); +	qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; +	set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); -	if (vfop) { -		bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, -				 cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, -					     cmd->block); +	for (qid = 0; qid < vf_rxq_count(vf); qid++) { +		qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); +		qstate.params.update_tpa.sge_map = sge_addr[qid]; +		DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", +		   vf->abs_vfid, qid, U64_HI(sge_addr[qid]), +		   U64_LO(sge_addr[qid])); +		rc = bnx2x_queue_state_change(bp, &qstate); +		if (rc) { +			BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", +				  U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), +				  vf->abs_vfid, qid); +			return rc; +		}  	} -	return -ENOMEM; + +	return rc;  }  /* VF release ~ VF close + VF release-resources   * Release is the ultimate SW shutdown and is called whenever an   * irrecoverable error is encountered.   */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)  { -	struct bnx2x_vfop_cmd cmd = { -		.done = NULL, -		.block = block, -	};  	int rc;  	DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);  	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); -	rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); +	rc = bnx2x_vf_free(bp, vf);  	if (rc)  		WARN(rc,  		     "VF[%d] Failed to allocate resources for release op- rc=%d\n",  		     vf->abs_vfid, rc); +	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); +	return rc;  }  static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, @@ -3035,16 +2261,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,  	*sbdf = vf->devfn | (vf->bus << 8);  } -static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, -		       struct bnx2x_vf_bar_info *bar_info) -{ -	int n; - -	bar_info->nr_bars = bp->vfdb->sriov.nres; -	for (n = 0; n < bar_info->nr_bars; n++) -		bar_info->bars[n] = vf->bars[n]; -} -  void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,  			      enum channel_tlvs tlv)  { @@ -3096,10 +2312,69 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,  	   vf->abs_vfid, vf->op_current);  } +static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) +{ +	struct bnx2x_queue_state_params q_params; +	u32 prev_flags; +	int i, rc; + +	/* Verify changes are needed and record current Tx switching state */ +	prev_flags = bp->flags; +	if (enable) +		bp->flags |= TX_SWITCHING; +	else +		bp->flags &= ~TX_SWITCHING; +	if (prev_flags == bp->flags) +		return 0; + +	/* Verify state enables the sending of queue ramrods */ +	if ((bp->state != BNX2X_STATE_OPEN) || +	    (bnx2x_get_q_logical_state(bp, +				      &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != +	     BNX2X_Q_LOGICAL_STATE_ACTIVE)) +		return 0; + +	/* send q. update ramrod to configure Tx switching */ +	memset(&q_params, 0, sizeof(q_params)); +	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); +	q_params.cmd = BNX2X_Q_CMD_UPDATE; +	__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, +		  &q_params.params.update.update_flags); +	if (enable) +		__set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, +			  &q_params.params.update.update_flags); +	else +		__clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, +			    &q_params.params.update.update_flags); + +	/* send the ramrod on all the queues of the PF */ +	for_each_eth_queue(bp, i) { +		struct bnx2x_fastpath *fp = &bp->fp[i]; + +		/* Set the appropriate Queue object */ +		q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + +		/* Update the Queue state */ +		rc = bnx2x_queue_state_change(bp, &q_params); +		if (rc) { +			BNX2X_ERR("Failed to configure Tx switching\n"); +			return rc; +		} +	} + +	DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); +	return 0; +} +  int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)  {  	struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); +	if (!IS_SRIOV(bp)) { +		BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); +		return -EINVAL; +	} +  	DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",  	   num_vfs_param, BNX2X_NR_VIRTFN(bp)); @@ -3118,12 +2393,14 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)  	bp->requested_nr_virtfn = num_vfs_param;  	if (num_vfs_param == 0) { +		bnx2x_set_pf_tx_switching(bp, false);  		pci_disable_sriov(dev);  		return 0;  	} else {  		return bnx2x_enable_sriov(bp);  	}  } +  #define IGU_ENTRY_SIZE 4  int bnx2x_enable_sriov(struct bnx2x *bp) @@ -3183,13 +2460,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)  		bnx2x_iov_static_resc(bp, vf);  	} -	/* prepare msix vectors in VF configuration space */ +	/* prepare msix vectors in VF configuration space - the value in the +	 * PCI configuration space should be the index of the last entry, +	 * namely one less than the actual size of the table +	 */  	for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {  		bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));  		REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, -		       num_vf_queues); +		       num_vf_queues - 1);  		DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", -		   vf_idx, num_vf_queues); +		   vf_idx, num_vf_queues - 1);  	}  	bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); @@ -3197,7 +2477,12 @@ int bnx2x_enable_sriov(struct bnx2x *bp)  	 * the "acquire" messages to appear on the VF PF channel.  	 */  	DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); -	pci_disable_sriov(bp->pdev); +	bnx2x_disable_sriov(bp); + +	rc = bnx2x_set_pf_tx_switching(bp, true); +	if (rc) +		return rc; +  	rc = pci_enable_sriov(bp->pdev, req_vfs);  	if (rc) {  		BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3225,8 +2510,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)  	pci_disable_sriov(bp->pdev);  } -int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, -			struct pf_vf_bulletin_content **bulletin) +static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, +			     struct bnx2x_virtf **vf, +			     struct pf_vf_bulletin_content **bulletin)  {  	if (bp->state != BNX2X_STATE_OPEN) {  		BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3292,17 +2578,18 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,  	ivi->vf = vfidx;  	ivi->qos = 0; -	ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ +	ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ +	ivi->min_tx_rate = 0;  	ivi->spoofchk = 1; /*always enabled */  	if (vf->state == VF_ENABLED) {  		/* mac and vlan are in vlan_mac objects */ -		if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) +		if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {  			mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,  						0, ETH_ALEN); -		if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))  			vlan_obj->get_n_elements(bp, vlan_obj, 1,  						 (u8 *)&ivi->vlan, 0,  						 VLAN_HLEN); +		}  	} else {  		/* mac */  		if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3376,28 +2663,30 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)  	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {  		/* configure the mac in device on this vf's queue */  		unsigned long ramrod_flags = 0; -		struct bnx2x_vlan_mac_obj *mac_obj = -			&bnx2x_leading_vfq(vf, mac_obj); +		struct bnx2x_vlan_mac_obj *mac_obj; -		rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); -		if (rc) -			return rc; +		/* User should be able to see failure reason in system logs */ +		if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) +			return -EINVAL;  		/* must lock vfpf channel to protect against vf flows */  		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);  		/* remove existing eth macs */ +		mac_obj = &bnx2x_leading_vfq(vf, mac_obj);  		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);  		if (rc) {  			BNX2X_ERR("failed to delete eth macs\n"); -			return -EINVAL; +			rc = -EINVAL; +			goto out;  		}  		/* remove existing uc list macs */  		rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);  		if (rc) {  			BNX2X_ERR("failed to delete uc_list macs\n"); -			return -EINVAL; +			rc = -EINVAL; +			goto out;  		}  		/* configure the new mac to device */ @@ -3405,18 +2694,27 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)  		bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,  				  BNX2X_ETH_MAC, &ramrod_flags); +out:  		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);  	} -	return 0; +	return rc;  }  int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)  { +	struct bnx2x_queue_state_params q_params = {NULL}; +	struct bnx2x_vlan_mac_ramrod_params ramrod_param; +	struct bnx2x_queue_update_params *update_params; +	struct pf_vf_bulletin_content *bulletin = NULL; +	struct bnx2x_rx_mode_ramrod_params rx_ramrod;  	struct bnx2x *bp = netdev_priv(dev); -	int rc, q_logical_state; +	struct bnx2x_vlan_mac_obj *vlan_obj; +	unsigned long vlan_mac_flags = 0; +	unsigned long ramrod_flags = 0;  	struct bnx2x_virtf *vf = NULL; -	struct pf_vf_bulletin_content *bulletin = NULL; +	unsigned long accept_flags; +	int rc;  	/* sanity and init */  	rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); @@ -3434,103 +2732,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)  	/* update PF's copy of the VF's bulletin. No point in posting the vlan  	 * to the VF since it doesn't have anything to do with it. But it useful  	 * to store it here in case the VF is not up yet and we can only -	 * configure the vlan later when it does. +	 * configure the vlan later when it does. Treat vlan id 0 as remove the +	 * Host tag.  	 */ -	bulletin->valid_bitmap |= 1 << VLAN_VALID; +	if (vlan > 0) +		bulletin->valid_bitmap |= 1 << VLAN_VALID; +	else +		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);  	bulletin->vlan = vlan;  	/* is vf initialized and queue set up? */ -	q_logical_state = -		bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); -	if (vf->state == VF_ENABLED && -	    q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { -		/* configure the vlan in device on this vf's queue */ -		unsigned long ramrod_flags = 0; -		unsigned long vlan_mac_flags = 0; -		struct bnx2x_vlan_mac_obj *vlan_obj = -			&bnx2x_leading_vfq(vf, vlan_obj); -		struct bnx2x_vlan_mac_ramrod_params ramrod_param; -		struct bnx2x_queue_state_params q_params = {NULL}; -		struct bnx2x_queue_update_params *update_params; - -		rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); -		if (rc) -			return rc; -		memset(&ramrod_param, 0, sizeof(ramrod_param)); +	if (vf->state != VF_ENABLED || +	    bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != +	    BNX2X_Q_LOGICAL_STATE_ACTIVE) +		return rc; -		/* must lock vfpf channel to protect against vf flows */ -		bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); +	/* User should be able to see error in system logs */ +	if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) +		return -EINVAL; -		/* remove existing vlans */ -		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); -		rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, -					  &ramrod_flags); -		if (rc) { -			BNX2X_ERR("failed to delete vlans\n"); -			return -EINVAL; -		} +	/* must lock vfpf channel to protect against vf flows */ +	bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); -		/* send queue update ramrod to configure default vlan and silent -		 * vlan removal +	/* remove existing vlans */ +	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); +	vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); +	rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, +				  &ramrod_flags); +	if (rc) { +		BNX2X_ERR("failed to delete vlans\n"); +		rc = -EINVAL; +		goto out; +	} + +	/* need to remove/add the VF's accept_any_vlan bit */ +	accept_flags = bnx2x_leading_vfq(vf, accept_flags); +	if (vlan) +		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); +	else +		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + +	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, +			      accept_flags); +	bnx2x_leading_vfq(vf, accept_flags) = accept_flags; +	bnx2x_config_rx_mode(bp, &rx_ramrod); + +	/* configure the new vlan to device */ +	memset(&ramrod_param, 0, sizeof(ramrod_param)); +	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); +	ramrod_param.vlan_mac_obj = vlan_obj; +	ramrod_param.ramrod_flags = ramrod_flags; +	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, +		&ramrod_param.user_req.vlan_mac_flags); +	ramrod_param.user_req.u.vlan.vlan = vlan; +	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; +	rc = bnx2x_config_vlan_mac(bp, &ramrod_param); +	if (rc) { +		BNX2X_ERR("failed to configure vlan\n"); +		rc =  -EINVAL; +		goto out; +	} + +	/* send queue update ramrod to configure default vlan and silent +	 * vlan removal +	 */ +	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); +	q_params.cmd = BNX2X_Q_CMD_UPDATE; +	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); +	update_params = &q_params.params.update; +	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, +		  &update_params->update_flags); +	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, +		  &update_params->update_flags); +	if (vlan == 0) { +		/* if vlan is 0 then we want to leave the VF traffic +		 * untagged, and leave the incoming traffic untouched +		 * (i.e. do not remove any vlan tags). +		 */ +		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, +			    &update_params->update_flags); +		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, +			    &update_params->update_flags); +	} else { +		/* configure default vlan to vf queue and set silent +		 * vlan removal (the vf remains unaware of this vlan).  		 */ -		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); -		q_params.cmd = BNX2X_Q_CMD_UPDATE; -		q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); -		update_params = &q_params.params.update; -		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, +		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,  			  &update_params->update_flags); -		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, +		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,  			  &update_params->update_flags); +		update_params->def_vlan = vlan; +		update_params->silent_removal_value = +			vlan & VLAN_VID_MASK; +		update_params->silent_removal_mask = VLAN_VID_MASK; +	} -		if (vlan == 0) { -			/* if vlan is 0 then we want to leave the VF traffic -			 * untagged, and leave the incoming traffic untouched -			 * (i.e. do not remove any vlan tags). -			 */ -			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, -				    &update_params->update_flags); -			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, -				    &update_params->update_flags); -		} else { -			/* configure the new vlan to device */ -			__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); -			ramrod_param.vlan_mac_obj = vlan_obj; -			ramrod_param.ramrod_flags = ramrod_flags; -			ramrod_param.user_req.u.vlan.vlan = vlan; -			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; -			rc = bnx2x_config_vlan_mac(bp, &ramrod_param); -			if (rc) { -				BNX2X_ERR("failed to configure vlan\n"); -				return -EINVAL; -			} - -			/* configure default vlan to vf queue and set silent -			 * vlan removal (the vf remains unaware of this vlan). -			 */ -			update_params = &q_params.params.update; -			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, -				  &update_params->update_flags); -			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, -				  &update_params->update_flags); -			update_params->def_vlan = vlan; -		} +	/* Update the Queue state */ +	rc = bnx2x_queue_state_change(bp, &q_params); +	if (rc) { +		BNX2X_ERR("Failed to configure default VLAN\n"); +		goto out; +	} -		/* Update the Queue state */ -		rc = bnx2x_queue_state_change(bp, &q_params); -		if (rc) { -			BNX2X_ERR("Failed to configure default VLAN\n"); -			return rc; -		} -		/* clear the flag indicating that this VF needs its vlan -		 * (will only be set if the HV configured th Vlan before vf was -		 * and we were called because the VF came up later -		 */ -		vf->cfg_flags &= ~VF_CFG_VLAN; +	/* clear the flag indicating that this VF needs its vlan +	 * (will only be set if the HV configured the Vlan before vf was +	 * up and we were called because the VF came up later +	 */ +out: +	vf->cfg_flags &= ~VF_CFG_VLAN; +	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); -		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); -	} -	return 0; +	return rc;  }  /* crc is the first field in the bulletin board. Compute the crc over the @@ -3580,7 +2893,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)  	/* the mac address in bulletin board is valid and is new */  	if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && -	    memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { +	    !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {  		/* update new mac to net device */  		memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);  	} @@ -3600,13 +2913,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)  	bnx2x_sample_bulletin(bp);  	/* if channel is down we need to self destruct */ -	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { -		smp_mb__before_clear_bit(); -		set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, -			&bp->sp_rtnl_state); -		smp_mb__after_clear_bit(); -		schedule_delayed_work(&bp->sp_rtnl_task, 0); -	} +	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) +		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, +				       BNX2X_MSG_IOV);  }  void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) @@ -3615,51 +2924,37 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)  	return bp->regview + PXP_VF_ADDR_DB_START;  } +void bnx2x_vf_pci_dealloc(struct bnx2x *bp) +{ +	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, +		       sizeof(struct bnx2x_vf_mbx_msg)); +	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, +		       sizeof(union pf_vf_bulletin)); +} +  int bnx2x_vf_pci_alloc(struct bnx2x *bp)  {  	mutex_init(&bp->vf2pf_mutex);  	/* allocate vf2pf mailbox for vf to pf channel */ -	BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, -			sizeof(struct bnx2x_vf_mbx_msg)); +	bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, +					 sizeof(struct bnx2x_vf_mbx_msg)); +	if (!bp->vf2pf_mbox) +		goto alloc_mem_err;  	/* allocate pf 2 vf bulletin board */ -	BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, -			sizeof(union pf_vf_bulletin)); +	bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, +					     sizeof(union pf_vf_bulletin)); +	if (!bp->pf2vf_bulletin) +		goto alloc_mem_err;  	return 0;  alloc_mem_err: -	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, -		       sizeof(struct bnx2x_vf_mbx_msg)); -	BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, -		       sizeof(union pf_vf_bulletin)); +	bnx2x_vf_pci_dealloc(bp);  	return -ENOMEM;  } -int bnx2x_open_epilog(struct bnx2x *bp) -{ -	/* Enable sriov via delayed work. This must be done via delayed work -	 * because it causes the probe of the vf devices to be run, which invoke -	 * register_netdevice which must have rtnl lock taken. As we are holding -	 * the lock right now, that could only work if the probe would not take -	 * the lock. However, as the probe of the vf may be called from other -	 * contexts as well (such as passthrough to vm fails) it can't assume -	 * the lock is being held for it. Using delayed work here allows the -	 * probe code to simply take the lock (i.e. wait for it to be released -	 * if it is being held). We only want to do this if the number of VFs -	 * was set before PF driver was loaded. -	 */ -	if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { -		smp_mb__before_clear_bit(); -		set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); -		smp_mb__after_clear_bit(); -		schedule_delayed_work(&bp->sp_rtnl_task, 0); -	} - -	return 0; -} -  void bnx2x_iov_channel_down(struct bnx2x *bp)  {  	int vf_idx; @@ -3679,3 +2974,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)  		bnx2x_post_vf_bulletin(bp, vf_idx);  	}  } + +void bnx2x_iov_task(struct work_struct *work) +{ +	struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); + +	if (!netif_running(bp->dev)) +		return; + +	if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, +			       &bp->iov_task_state)) +		bnx2x_vf_handle_flr_event(bp); + +	if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, +			       &bp->iov_task_state)) +		bnx2x_vf_mbx(bp); +} + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) +{ +	smp_mb__before_atomic(); +	set_bit(flag, &bp->iov_task_state); +	smp_mb__after_atomic(); +	DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); +	queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 059f0d460af..96c575e147a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -12,9 +12,9 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Shmulik Ravid <shmulikr@broadcom.com> - *	       Ariel Elior <ariele@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + *	       Ariel Elior <ariel.elior@qlogic.com>   */  #ifndef BNX2X_SRIOV_H  #define BNX2X_SRIOV_H @@ -30,6 +30,8 @@ enum sample_bulletin_result {  #ifdef CONFIG_BNX2X_SRIOV +extern struct workqueue_struct *bnx2x_iov_wq; +  /* The bnx2x device structure holds vfdb structure described below.   * The VF array is indexed by the relative vfid.   */ @@ -74,6 +76,7 @@ struct bnx2x_vf_queue {  	/* VLANs object */  	struct bnx2x_vlan_mac_obj	vlan_obj;  	atomic_t vlan_count;		/* 0 means vlan-0 is set  ~ untagged */ +	unsigned long accept_flags;	/* last accept flags configured */  	/* Queue Slow-path State object */  	struct bnx2x_queue_sp_obj	sp_obj; @@ -82,108 +85,35 @@ struct bnx2x_vf_queue {  	u16 index;  	u16 sb_idx;  	bool is_leading; +	bool sp_initialized;  }; -/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: - * q-init, q-setup and SB index +/* struct bnx2x_vf_queue_construct_params - prepare queue construction + * parameters: q-init, q-setup and SB index   */ -struct bnx2x_vfop_qctor_params { +struct bnx2x_vf_queue_construct_params {  	struct bnx2x_queue_state_params		qstate;  	struct bnx2x_queue_setup_params		prep_qsetup;  }; -/* VFOP parameters (one copy per VF) */ -union bnx2x_vfop_params { -	struct bnx2x_vlan_mac_ramrod_params	vlan_mac; -	struct bnx2x_rx_mode_ramrod_params	rx_mode; -	struct bnx2x_mcast_ramrod_params	mcast; -	struct bnx2x_config_rss_params		rss; -	struct bnx2x_vfop_qctor_params		qctor; -}; -  /* forward */  struct bnx2x_virtf;  /* VFOP definitions */ -typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); - -struct bnx2x_vfop_cmd { -	vfop_handler_t done; -	bool block; -}; -/* VFOP queue filters command additional arguments */ -struct bnx2x_vfop_filter { -	struct list_head link; +struct bnx2x_vf_mac_vlan_filter {  	int type; -#define BNX2X_VFOP_FILTER_MAC	1 -#define BNX2X_VFOP_FILTER_VLAN	2 +#define BNX2X_VF_FILTER_MAC	1 +#define BNX2X_VF_FILTER_VLAN	2  	bool add;  	u8 *mac;  	u16 vid;  }; -struct bnx2x_vfop_filters { -	int add_cnt; -	struct list_head head; -	struct bnx2x_vfop_filter filters[]; -}; - -/* transient list allocated, built and saved until its - * passed to the SP-VERBs layer. - */ -struct bnx2x_vfop_args_mcast { -	int mc_num; -	struct bnx2x_mcast_list_elem *mc; -}; - -struct bnx2x_vfop_args_qctor { -	int	qid; -	u16	sb_idx; -}; - -struct bnx2x_vfop_args_qdtor { -	int	qid; -	struct eth_context *cxt; -}; - -struct bnx2x_vfop_args_defvlan { -	int	qid; -	bool	enable; -	u16	vid; -	u8	prio; -}; - -struct bnx2x_vfop_args_qx { -	int	qid; -	bool	en_add; -}; - -struct bnx2x_vfop_args_filters { -	struct bnx2x_vfop_filters *multi_filter; -	atomic_t *credit;	/* non NULL means 'don't consume credit' */ -}; - -union bnx2x_vfop_args { -	struct bnx2x_vfop_args_mcast	mc_list; -	struct bnx2x_vfop_args_qctor	qctor; -	struct bnx2x_vfop_args_qdtor	qdtor; -	struct bnx2x_vfop_args_defvlan	defvlan; -	struct bnx2x_vfop_args_qx	qx; -	struct bnx2x_vfop_args_filters	filters; -}; - -struct bnx2x_vfop { -	struct list_head link; -	int			rc;		/* return code */ -	int			state;		/* next state */ -	union bnx2x_vfop_args	args;		/* extra arguments */ -	union bnx2x_vfop_params *op_p;		/* ramrod params */ - -	/* state machine callbacks */ -	vfop_handler_t transition; -	vfop_handler_t done; +struct bnx2x_vf_mac_vlan_filters { +	int count; +	struct bnx2x_vf_mac_vlan_filter filters[];  };  /* vf context */ @@ -203,15 +133,7 @@ struct bnx2x_virtf {  #define VF_ENABLED	2	/* VF Enabled */  #define VF_RESET	3	/* VF FLR'd, pending cleanup */ -	/* non 0 during flr cleanup */ -	u8 flr_clnup_stage; -#define VF_FLR_CLN	1	/* reclaim resources and do 'final cleanup' -				 * sans the end-wait -				 */ -#define VF_FLR_ACK	2	/* ACK flr notification */ -#define VF_FLR_EPILOG	3	/* wait for VF remnants to dissipate in the HW -				 * ~ final cleanup' end wait -				 */ +	bool flr_clnup_stage;	/* true during flr cleanup */  	/* dma */  	dma_addr_t fw_stat_map;		/* valid iff VF_CFG_STATS */ @@ -237,6 +159,8 @@ struct bnx2x_virtf {  #define vf_mac_rules_cnt(vf)		((vf)->alloc_resc.num_mac_filters)  #define vf_vlan_rules_cnt(vf)		((vf)->alloc_resc.num_vlan_filters)  #define vf_mc_rules_cnt(vf)		((vf)->alloc_resc.num_mc_filters) +	/* Hide a single vlan filter credit for the hypervisor */ +#define vf_vlan_rules_visible_cnt(vf)	(vf_vlan_rules_cnt(vf) - 1)  	u8 sb_count;	/* actual number of SBs */  	u8 igu_base_id;	/* base igu status block id */ @@ -268,17 +192,13 @@ struct bnx2x_virtf {  	int leading_rss;  	/* MCAST object */ +	int mcast_list_len;  	struct bnx2x_mcast_obj		mcast_obj;  	/* RSS configuration object */  	struct bnx2x_rss_config_obj     rss_conf_obj;  	/* slow-path operations */ -	atomic_t			op_in_progress; -	int				op_rc; -	bool				op_wait_blocking; -	struct list_head		op_list_head; -	union bnx2x_vfop_params		op_params;  	struct mutex			op_mutex; /* one vfop at a time mutex */  	enum channel_tlvs		op_current;  }; @@ -336,11 +256,6 @@ struct bnx2x_vf_mbx {  	u32 vf_addr_hi;  	struct vfpf_first_tlv first_tlv;	/* saved VF request header */ - -	u8 flags; -#define VF_MSG_INPROCESS	0x1	/* failsafe - the FW should prevent -					 * more then one pending msg -					 */  };  struct bnx2x_vf_sp { @@ -417,6 +332,10 @@ struct bnx2x_vfdb {  	/* the number of msix vectors belonging to this PF designated for VFs */  	u16 vf_sbs_pool;  	u16 first_vf_igu_entry; + +	/* sp_rtnl synchronization */ +	struct mutex			event_mutex; +	u64				event_occur;  };  /* queue access */ @@ -466,13 +385,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);  void bnx2x_iov_init_dmae(struct bnx2x *bp);  void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,  				struct bnx2x_queue_sp_obj **q_obj); -void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);  int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);  void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);  void bnx2x_iov_storm_stats_update(struct bnx2x *bp); -void bnx2x_iov_sp_task(struct bnx2x *bp);  /* global vf mailbox routines */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); +void bnx2x_vf_mbx(struct bnx2x *bp); +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, +			   struct vf_pf_event_data *vfpf_event);  void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);  /* CORE VF API */ @@ -485,162 +404,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,  int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,  		  dma_addr_t *sb_map); -/* VFOP generic helpers */ -#define bnx2x_vfop_default(state) do {				\ -		BNX2X_ERR("Bad state %d\n", (state));		\ -		vfop->rc = -EINVAL;				\ -		goto op_err;					\ -	} while (0) - -enum { -	VFOP_DONE, -	VFOP_CONT, -	VFOP_VERIFY_PEND, -}; - -#define bnx2x_vfop_finalize(vf, rc, next) do {				\ -		if ((rc) < 0)						\ -			goto op_err;					\ -		else if ((rc) > 0)					\ -			goto op_pending;				\ -		else if ((next) == VFOP_DONE)				\ -			goto op_done;					\ -		else if ((next) == VFOP_VERIFY_PEND)			\ -			BNX2X_ERR("expected pending\n");		\ -		else {							\ -			DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n");	\ -			atomic_set(&vf->op_in_progress, 1);		\ -			queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);  \ -			return;						\ -		}							\ -	} while (0) - -#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr)		\ -	do {								\ -		vfop->state = first_state;				\ -		vfop->op_p = &vf->op_params;				\ -		vfop->transition = trans_hndlr;				\ -		vfop->done = done_hndlr;				\ -	} while (0) - -static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, -						struct bnx2x_virtf *vf) -{ -	WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); -	WARN_ON(list_empty(&vf->op_list_head)); -	return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); -} - -static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, -						struct bnx2x_virtf *vf) -{ -	struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); - -	WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); -	if (vfop) { -		INIT_LIST_HEAD(&vfop->link); -		list_add(&vfop->link, &vf->op_list_head); -	} -	return vfop; -} - -static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, -				  struct bnx2x_vfop *vfop) -{ -	/* rc < 0 - error, otherwise set to 0 */ -	DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); -	if (vfop->rc >= 0) -		vfop->rc = 0; -	DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); - -	/* unlink the current op context and propagate error code -	 * must be done before invoking the 'done()' handler -	 */ -	WARN(!mutex_is_locked(&vf->op_mutex), -	     "about to access vf op linked list but mutex was not locked!"); -	list_del(&vfop->link); - -	if (list_empty(&vf->op_list_head)) { -		DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); -		vf->op_rc = vfop->rc; -		DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d,  vfop->rc %d\n", -		   vf->op_rc, vfop->rc); -	} else { -		struct bnx2x_vfop *cur_vfop; - -		DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); -		cur_vfop = bnx2x_vfop_cur(bp, vf); -		cur_vfop->rc = vfop->rc; -		DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", -		   vf->op_rc, vfop->rc); -	} - -	/* invoke done handler */ -	if (vfop->done) { -		DP(BNX2X_MSG_IOV, "calling done handler\n"); -		vfop->done(bp, vf); -	} else { -		/* there is no done handler for the operation to unlock -		 * the mutex. Must have gotten here from PF initiated VF RELEASE -		 */ -		bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); -	} - -	DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", -	   vf->op_rc, vfop->rc); - -	/* if this is the last nested op reset the wait_blocking flag -	 * to release any blocking wrappers, only after 'done()' is invoked -	 */ -	if (list_empty(&vf->op_list_head)) { -		DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); -		vf->op_wait_blocking = false; -	} - -	kfree(vfop); -} - -static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, -					   struct bnx2x_virtf *vf) -{ -	/* can take a while if any port is running */ -	int cnt = 5000; - -	might_sleep(); -	while (cnt--) { -		if (vf->op_wait_blocking == false) { -#ifdef BNX2X_STOP_ON_ERROR -			DP(BNX2X_MSG_IOV, "exit  (cnt %d)\n", 5000 - cnt); -#endif -			return 0; -		} -		usleep_range(1000, 2000); - -		if (bp->panic) -			return -EIO; -	} - -	/* timeout! */ -#ifdef BNX2X_STOP_ON_ERROR -	bnx2x_panic(); -#endif - -	return -EBUSY; -} - -static inline int bnx2x_vfop_transition(struct bnx2x *bp, -					struct bnx2x_virtf *vf, -					vfop_handler_t transition, -					bool block) -{ -	if (block) -		vf->op_wait_blocking = true; -	transition(bp, vf); -	if (block) -		return bnx2x_vfop_wait_blocking(bp, vf); -	return 0; -} -  /* VFOP queue construction helpers */  void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,  			    struct bnx2x_queue_init_params *init_params, @@ -655,64 +418,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,  void bnx2x_vfop_qctor_prep(struct bnx2x *bp,  			   struct bnx2x_virtf *vf,  			   struct bnx2x_vf_queue *q, -			   struct bnx2x_vfop_qctor_params *p, +			   struct bnx2x_vf_queue_construct_params *p,  			   unsigned long q_type); -int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, -			    struct bnx2x_virtf *vf, -			    struct bnx2x_vfop_cmd *cmd, -			    struct bnx2x_vfop_filters *macs, -			    int qid, bool drv_only); - -int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, -			    struct bnx2x_virtf *vf, -			    struct bnx2x_vfop_cmd *cmd, -			    int qid, u16 vid, bool add); - -int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, -			     struct bnx2x_virtf *vf, -			     struct bnx2x_vfop_cmd *cmd, -			     struct bnx2x_vfop_filters *vlans, -			     int qid, bool drv_only); - -int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, -			  struct bnx2x_virtf *vf, -			  struct bnx2x_vfop_cmd *cmd, -			  int qid); - -int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd, -			 int qid); - -int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd, -			 bnx2x_mac_addr_t *mcasts, -			 int mcast_num, bool drv_only); - -int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, -			  struct bnx2x_virtf *vf, -			  struct bnx2x_vfop_cmd *cmd, -			  int qid, unsigned long accept_flags); - -int bnx2x_vfop_close_cmd(struct bnx2x *bp, -			 struct bnx2x_virtf *vf, -			 struct bnx2x_vfop_cmd *cmd); - -int bnx2x_vfop_release_cmd(struct bnx2x *bp, -			   struct bnx2x_virtf *vf, -			   struct bnx2x_vfop_cmd *cmd); -int bnx2x_vfop_rss_cmd(struct bnx2x *bp, -		       struct bnx2x_virtf *vf, -		       struct bnx2x_vfop_cmd *cmd); +int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, +				  struct bnx2x_vf_mac_vlan_filters *filters, +				  int qid, bool drv_only); + +int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, +			 struct bnx2x_vf_queue_construct_params *qctor); + +int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid); + +int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, +		   bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only); + +int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, +		    int qid, unsigned long accept_flags); + +int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf); + +int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, +			struct bnx2x_config_rss_params *rss); + +int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, +			struct vfpf_tpa_tlv *tlv, +			struct bnx2x_queue_update_tpa_params *params);  /* VF release ~ VF close + VF release-resources   *   * Release is the ultimate SW shutdown and is called whenever an   * irrecoverable error is encountered.   */ -void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); +int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);  int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);  u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); @@ -725,13 +465,6 @@ void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);  /* Handles an FLR (or VF_DISABLE) notification form the MCP */  void bnx2x_vf_handle_flr_event(struct bnx2x *bp); -void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, -		   u16 length); -void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, -		     u16 type, u16 length); -void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv); -void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); -  bool bnx2x_tlv_supported(u16 tlvtype);  u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, @@ -748,7 +481,6 @@ int bnx2x_vfpf_init(struct bnx2x *bp);  void bnx2x_vfpf_close_vf(struct bnx2x *bp);  int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,  		       bool is_leading); -int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);  int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);  int bnx2x_vfpf_config_rss(struct bnx2x *bp,  			  struct bnx2x_config_rss_params *params); @@ -772,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,  enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);  void bnx2x_timer_sriov(struct bnx2x *bp);  void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); +void bnx2x_vf_pci_dealloc(struct bnx2x *bp);  int bnx2x_vf_pci_alloc(struct bnx2x *bp);  int bnx2x_enable_sriov(struct bnx2x *bp);  void bnx2x_disable_sriov(struct bnx2x *bp); @@ -782,20 +515,21 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)  void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);  int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);  void bnx2x_iov_channel_down(struct bnx2x *bp); -int bnx2x_open_epilog(struct bnx2x *bp); + +void bnx2x_iov_task(struct work_struct *work); + +void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);  #else /* CONFIG_BNX2X_SRIOV */  static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,  				struct bnx2x_queue_sp_obj **q_obj) {} -static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, -				      bool queue_work) {}  static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}  static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,  					union event_ring_elem *elem) {return 1; } -static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} -static inline void bnx2x_vf_mbx(struct bnx2x *bp, -				struct vf_pf_event_data *vfpf_event) {} +static inline void bnx2x_vf_mbx(struct bnx2x *bp) {} +static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp, +					 struct vf_pf_event_data *vfpf_event) {}  static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }  static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}  static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } @@ -813,7 +547,6 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }  static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }  static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}  static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } -static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }  static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,  					u8 vf_qid, bool set) {return 0; }  static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, @@ -838,11 +571,14 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)  	return NULL;  } +static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}  static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }  static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}  static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }  static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} -static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; } + +static inline void bnx2x_iov_task(struct work_struct *work) {} +static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}  #endif /* CONFIG_BNX2X_SRIOV */  #endif /* bnx2x_sriov.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 86436c77af0..ca47665f94b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman @@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)  	} else if (bp->func_stx) {  		*stats_comp = 0; -		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); +		bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);  	}  } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index f35845006cd..2beceaefdee 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -6,7 +6,7 @@   * it under the terms of the GNU General Public License as published by   * the Free Software Foundation.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com>   * Written by: Eliezer Tamir   * Based on code from Michael Chan's bnx2 driver   * UDP CSUM errata workaround by Arik Gendelman diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index da16953eb2e..d712d0ddd71 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -12,18 +12,20 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Shmulik Ravid <shmulikr@broadcom.com> - *	       Ariel Elior <ariele@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Shmulik Ravid + *	       Ariel Elior <ariel.elior@qlogic.com>   */  #include "bnx2x.h"  #include "bnx2x_cmn.h"  #include <linux/crc32.h> +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); +  /* place a given tlv on the tlv buffer at a given offset */ -void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type, -		   u16 length) +static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, +			  u16 offset, u16 type, u16 length)  {  	struct channel_tlv *tl =  		(struct channel_tlv *)(tlvs_list + offset); @@ -33,8 +35,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,  }  /* Clear the mailbox and init the header of the first tlv */ -void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, -		     u16 type, u16 length) +static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, +			    u16 type, u16 length)  {  	mutex_lock(&bp->vf2pf_mutex); @@ -52,7 +54,8 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,  }  /* releases the mailbox */ -void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv) +static void bnx2x_vfpf_finalize(struct bnx2x *bp, +				struct vfpf_first_tlv *first_tlv)  {  	DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",  	   first_tlv->tl.type); @@ -60,8 +63,32 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)  	mutex_unlock(&bp->vf2pf_mutex);  } +/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */ +static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list, +				   enum channel_tlvs req_tlv) +{ +	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; + +	do { +		if (tlv->type == req_tlv) +			return tlv; + +		if (!tlv->length) { +			BNX2X_ERR("Found TLV with length 0\n"); +			return NULL; +		} + +		tlvs_list += tlv->length; +		tlv = (struct channel_tlv *)tlvs_list; +	} while (tlv->type != CHANNEL_TLV_LIST_END); + +	DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv); + +	return NULL; +} +  /* list the types and lengths of the tlvs on the buffer */ -void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) +static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)  {  	int i = 1;  	struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list; @@ -128,7 +155,7 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)  	if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {  		DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");  		*done = PFVF_STATUS_SUCCESS; -		return 0; +		return -EINVAL;  	}  	/* Write message address */ @@ -184,7 +211,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)  		return -EINVAL;  	} -	BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); +	DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);  	*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; @@ -196,6 +223,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)  	int rc = 0, attempts = 0;  	struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;  	struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp; +	struct vfpf_port_phys_id_resp_tlv *phys_port_resp;  	u32 vf_id;  	bool resources_acquired = false; @@ -219,8 +247,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)  	/* pf 2 vf bulletin board address */  	req->bulletin_addr = bp->pf2vf_bulletin_mapping; +	/* Request physical port identifier */ +	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, +		      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv)); +  	/* add list termination tlv */ -	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, +	bnx2x_add_tlv(bp, req, +		      req->first_tlv.tl.length + sizeof(struct channel_tlv), +		      CHANNEL_TLV_LIST_END,  		      sizeof(struct channel_list_end_tlv));  	/* output tlvs list */ @@ -287,6 +321,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)  		}  	} +	/* Retrieve physical port id (if possible) */ +	phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *) +			 bnx2x_search_tlv_list(bp, resp, +					       CHANNEL_TLV_PHYS_PORT_ID); +	if (phys_port_resp) { +		memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN); +		bp->flags |= HAS_PHYS_PORT_ID; +	} +  	/* get HW info */  	bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);  	bp->link_params.chip_id = bp->common.chip_id; @@ -505,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,  	vf->leading_rss = cl_id;  	q->is_leading = true; +	q->sp_initialized = true;  }  /* ask the pf to open a queue for the vf */ @@ -593,7 +637,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,  	return rc;  } -int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) +static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)  {  	struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;  	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; @@ -629,6 +673,7 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)  out:  	bnx2x_vfpf_finalize(bp, &req->first_tlv); +  	return rc;  } @@ -702,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)  out:  	bnx2x_vfpf_finalize(bp, &req->first_tlv); -	return 0; +	return rc;  }  /* request pf to config rss table for vf queues*/ @@ -760,14 +805,18 @@ int bnx2x_vfpf_config_rss(struct bnx2x *bp,  	}  	if (resp->hdr.status != PFVF_STATUS_SUCCESS) { -		BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n", -			  resp->hdr.status); -		rc = -EINVAL; +		/* Since older drivers don't support this feature (and VF has +		 * no way of knowing other than failing this), don't propagate +		 * an error in this case. +		 */ +		DP(BNX2X_MSG_IOV, +		   "Failed to send rss message to PF over VF-PF channel [%d]\n", +		   resp->hdr.status);  	}  out:  	bnx2x_vfpf_finalize(bp, &req->first_tlv); -	return 0; +	return rc;  }  int bnx2x_vfpf_set_mcast(struct net_device *dev) @@ -847,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)  	DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode); -	switch (mode) { -	case BNX2X_RX_MODE_NONE: /* no Rx */ +	/* Ignore everything accept MODE_NONE */ +	if (mode  == BNX2X_RX_MODE_NONE) {  		req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; -		break; -	case BNX2X_RX_MODE_NORMAL: +	} else { +		/* Current PF driver will not look at the specific flags, +		 * but they are required when working with older drivers on hv. +		 */  		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;  		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;  		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; -		break; -	case BNX2X_RX_MODE_ALLMULTI: -		req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; -		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; -		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; -		break; -	case BNX2X_RX_MODE_PROMISC: -		req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST; -		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST; -		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; -		break; -	default: -		BNX2X_ERR("BAD rx mode (%d)\n", mode); -		rc = -EINVAL; -		goto out;  	}  	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; @@ -890,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)  		BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);  		rc = -EINVAL;  	} -out: +  	bnx2x_vfpf_finalize(bp, &req->first_tlv);  	return rc; @@ -980,64 +1016,68 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,  	dmae.len = len32;  	/* issue the command and wait for completion */ -	return bnx2x_issue_dmae_with_comp(bp, &dmae); +	return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));  } -static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf) +static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp, +					 struct bnx2x_virtf *vf)  {  	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); -	u64 vf_addr; -	dma_addr_t pf_addr;  	u16 length, type; -	int rc; -	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;  	/* prepare response */  	type = mbx->first_tlv.tl.type;  	length = type == CHANNEL_TLV_ACQUIRE ?  		sizeof(struct pfvf_acquire_resp_tlv) :  		sizeof(struct pfvf_general_resp_tlv); -	bnx2x_add_tlv(bp, resp, 0, type, length); -	resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc); -	bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END, +	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length); +	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,  		      sizeof(struct channel_list_end_tlv)); +} + +static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp, +				       struct bnx2x_virtf *vf, +				       int vf_rc) +{ +	struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index); +	struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp; +	dma_addr_t pf_addr; +	u64 vf_addr; +	int rc; +  	bnx2x_dp_tlv_list(bp, resp);  	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",  	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); +	resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc); +  	/* send response */  	vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +  		  mbx->first_tlv.resp_msg_offset;  	pf_addr = mbx->msg_mapping +  		  offsetof(struct bnx2x_vf_mbx_msg, resp); -	/* copy the response body, if there is one, before the header, as the vf -	 * is sensitive to the header being written +	/* Copy the response buffer. The first u64 is written afterwards, as +	 * the vf is sensitive to the header being written  	 */ -	if (resp->hdr.tl.length > sizeof(u64)) { -		length = resp->hdr.tl.length - sizeof(u64); -		vf_addr += sizeof(u64); -		pf_addr += sizeof(u64); -		rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, -					  U64_HI(vf_addr), -					  U64_LO(vf_addr), -					  length/4); -		if (rc) { -			BNX2X_ERR("Failed to copy response body to VF %d\n", -				  vf->abs_vfid); -			goto mbx_error; -		} -		vf_addr -= sizeof(u64); -		pf_addr -= sizeof(u64); +	vf_addr += sizeof(u64); +	pf_addr += sizeof(u64); +	rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid, +				  U64_HI(vf_addr), +				  U64_LO(vf_addr), +				  (sizeof(union pfvf_tlvs) - sizeof(u64))/4); +	if (rc) { +		BNX2X_ERR("Failed to copy response body to VF %d\n", +			  vf->abs_vfid); +		goto mbx_error;  	} +	vf_addr -= sizeof(u64); +	pf_addr -= sizeof(u64);  	/* ack the FW */  	storm_memset_vf_mbx_ack(bp, vf->abs_vfid);  	mmiowb(); -	/* initiate dmae to send the response */ -	mbx->flags &= ~VF_MSG_INPROCESS; -  	/* copy the response header including status-done field,  	 * must be last dmae, must be after FW is acked  	 */ @@ -1057,7 +1097,38 @@ static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)  	return;  mbx_error: -	bnx2x_vf_release(bp, vf, false); /* non blocking */ +	bnx2x_vf_release(bp, vf); +} + +static void bnx2x_vf_mbx_resp(struct bnx2x *bp, +			      struct bnx2x_virtf *vf, +			      int rc) +{ +	bnx2x_vf_mbx_resp_single_tlv(bp, vf); +	bnx2x_vf_mbx_resp_send_msg(bp, vf, rc); +} + +static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp, +					struct bnx2x_virtf *vf, +					void *buffer, +					u16 *offset) +{ +	struct vfpf_port_phys_id_resp_tlv *port_id; + +	if (!(bp->flags & HAS_PHYS_PORT_ID)) +		return; + +	bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID, +		      sizeof(struct vfpf_port_phys_id_resp_tlv)); + +	port_id = (struct vfpf_port_phys_id_resp_tlv *) +		  (((u8 *)buffer) + *offset); +	memcpy(port_id->id, bp->phys_port_id, ETH_ALEN); + +	/* Offset should continue representing the offset to the tail +	 * of TLV data (outside this function scope) +	 */ +	*offset += sizeof(struct vfpf_port_phys_id_resp_tlv);  }  static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1067,6 +1138,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,  	struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;  	struct pf_vf_resc *resc = &resp->resc;  	u8 status = bnx2x_pfvf_status_codes(vfop_status); +	u16 length;  	memset(resp, 0, sizeof(*resp)); @@ -1075,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,  	resp->pfdev_info.db_size = bp->db_size;  	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;  	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | -				   /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); +				   PFVF_CAP_TPA | +				   PFVF_CAP_TPA_UPDATE);  	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,  			  sizeof(resp->pfdev_info.fw_ver)); @@ -1090,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,  			bnx2x_vf_max_queue_cnt(bp, vf);  		resc->num_sbs = vf_sb_count(vf);  		resc->num_mac_filters = vf_mac_rules_cnt(vf); -		resc->num_vlan_filters = vf_vlan_rules_cnt(vf); +		resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);  		resc->num_mc_filters = 0;  		if (status == PFVF_STATUS_SUCCESS) { @@ -1140,9 +1213,23 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,  			resc->hw_sbs[i].sb_qid);  	DP_CONT(BNX2X_MSG_IOV, "]\n"); +	/* prepare response */ +	length = sizeof(struct pfvf_acquire_resp_tlv); +	bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length); + +	/* Handle possible VF requests for physical port identifiers. +	 * 'length' should continue to indicate the offset of the first empty +	 * place in the buffer (i.e., where next TLV should be inserted) +	 */ +	if (bnx2x_search_tlv_list(bp, &mbx->msg->req, +				  CHANNEL_TLV_PHYS_PORT_ID)) +		bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length); + +	bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END, +		      sizeof(struct channel_list_end_tlv)); +  	/* send the response */ -	vf->op_rc = vfop_status; -	bnx2x_vf_mbx_resp(bp, vf); +	bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);  }  static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -1174,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,  			      struct bnx2x_vf_mbx *mbx)  {  	struct vfpf_init_tlv *init = &mbx->msg->req.init; +	int rc;  	/* record ghost addresses from vf message */  	vf->spq_map = init->spq_addr;  	vf->fw_stat_map = init->stats_addr;  	vf->stats_stride = init->stats_stride; -	vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); +	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);  	/* set VF multiqueue statistics collection mode */  	if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)  		vf->cfg_flags |= VF_CFG_STATS_COALESCE;  	/* response */ -	bnx2x_vf_mbx_resp(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  /* convert MBX queue-flags to standard SP queue-flags */ @@ -1221,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,  				 struct bnx2x_vf_mbx *mbx)  {  	struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; +	struct bnx2x_vf_queue_construct_params qctor; +	int rc = 0;  	/* verify vf_qid */  	if (setup_q->vf_qid >= vf_rxq_count(vf)) {  		BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",  			  setup_q->vf_qid, vf_rxq_count(vf)); -		vf->op_rc = -EINVAL; +		rc = -EINVAL;  		goto response;  	} @@ -1248,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,  			bnx2x_leading_vfq_init(bp, vf, q);  		/* re-init the VF operation context */ -		memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); -		setup_p = &vf->op_params.qctor.prep_qsetup; -		init_p =  &vf->op_params.qctor.qstate.params.init; +		memset(&qctor, 0 , +		       sizeof(struct bnx2x_vf_queue_construct_params)); +		setup_p = &qctor.prep_qsetup; +		init_p =  &qctor.qstate.params.init;  		/* activate immediately */  		__set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags); @@ -1324,48 +1411,46 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,  				setup_q->rxq.cache_line_log;  			rxq_params->sb_cq_index = setup_q->rxq.sb_index; +			/* rx setup - multicast engine */ +			if (bnx2x_vfq_is_leading(q)) { +				u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid); + +				rxq_params->mcast_engine_id = mcast_id; +				__set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); +			} +  			bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,  						 q->index, q->sb_idx);  		}  		/* complete the preparations */ -		bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type); +		bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); -		vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index); -		if (vf->op_rc) +		rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); +		if (rc)  			goto response; -		return;  	}  response: -	bnx2x_vf_mbx_resp(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  } -enum bnx2x_vfop_filters_state { -	   BNX2X_VFOP_MBX_Q_FILTERS_MACS, -	   BNX2X_VFOP_MBX_Q_FILTERS_VLANS, -	   BNX2X_VFOP_MBX_Q_FILTERS_RXMODE, -	   BNX2X_VFOP_MBX_Q_FILTERS_MCAST, -	   BNX2X_VFOP_MBX_Q_FILTERS_DONE -}; -  static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,  				     struct bnx2x_virtf *vf,  				     struct vfpf_set_q_filters_tlv *tlv, -				     struct bnx2x_vfop_filters **pfl, +				     struct bnx2x_vf_mac_vlan_filters **pfl,  				     u32 type_flag)  {  	int i, j; -	struct bnx2x_vfop_filters *fl = NULL; +	struct bnx2x_vf_mac_vlan_filters *fl = NULL;  	size_t fsz; -	fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) + -		sizeof(struct bnx2x_vfop_filters); +	fsz = tlv->n_mac_vlan_filters * +	      sizeof(struct bnx2x_vf_mac_vlan_filter) + +	      sizeof(struct bnx2x_vf_mac_vlan_filters);  	fl = kzalloc(fsz, GFP_KERNEL);  	if (!fl)  		return -ENOMEM; -	INIT_LIST_HEAD(&fl->head); -  	for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {  		struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i]; @@ -1373,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,  			continue;  		if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {  			fl->filters[j].mac = msg_filter->mac; -			fl->filters[j].type = BNX2X_VFOP_FILTER_MAC; +			fl->filters[j].type = BNX2X_VF_FILTER_MAC;  		} else {  			fl->filters[j].vid = msg_filter->vlan_tag; -			fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN; +			fl->filters[j].type = BNX2X_VF_FILTER_VLAN;  		}  		fl->filters[j].add =  			(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?  			true : false; -		list_add_tail(&fl->filters[j++].link, &fl->head); +		fl->count++;  	} -	if (list_empty(&fl->head)) +	if (!fl->count)  		kfree(fl);  	else  		*pfl = fl; @@ -1423,176 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,  #define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID  #define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID -static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf) +static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)  { -	int rc; +	int rc = 0;  	struct vfpf_set_q_filters_tlv *msg =  		&BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; -	struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); -	enum bnx2x_vfop_filters_state state = vfop->state; - -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vfop_mbx_qfilters, -		.block = false, -	}; - -	DP(BNX2X_MSG_IOV, "STATE: %d\n", state); +	/* check for any mac/vlan changes */ +	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { +		/* build mac list */ +		struct bnx2x_vf_mac_vlan_filters *fl = NULL; -	if (vfop->rc < 0) -		goto op_err; - -	switch (state) { -	case BNX2X_VFOP_MBX_Q_FILTERS_MACS: -		/* next state */ -		vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS; +		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, +					       VFPF_MAC_FILTER); +		if (rc) +			goto op_err; -		/* check for any vlan/mac changes */ -		if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { -			/* build mac list */ -			struct bnx2x_vfop_filters *fl = NULL; +		if (fl) { -			vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, -							     VFPF_MAC_FILTER); -			if (vfop->rc) +			/* set mac list */ +			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, +							   msg->vf_qid, +							   false); +			if (rc)  				goto op_err; - -			if (fl) { -				/* set mac list */ -				rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl, -							     msg->vf_qid, -							     false); -				if (rc) { -					vfop->rc = rc; -					goto op_err; -				} -				return; -			}  		} -		/* fall through */ -	case BNX2X_VFOP_MBX_Q_FILTERS_VLANS: -		/* next state */ -		vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE; +		/* build vlan list */ +		fl = NULL; -		/* check for any vlan/mac changes */ -		if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) { -			/* build vlan list */ -			struct bnx2x_vfop_filters *fl = NULL; - -			vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, -							     VFPF_VLAN_FILTER); -			if (vfop->rc) +		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl, +					       VFPF_VLAN_FILTER); +		if (rc) +			goto op_err; + +		if (fl) { +			/* set vlan list */ +			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl, +							   msg->vf_qid, +							   false); +			if (rc)  				goto op_err; - -			if (fl) { -				/* set vlan list */ -				rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl, -							      msg->vf_qid, -							      false); -				if (rc) { -					vfop->rc = rc; -					goto op_err; -				} -				return; -			}  		} -		/* fall through */ - -	case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE: -		/* next state */ -		vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST; - -		if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { -			unsigned long accept = 0; - -			/* covert VF-PF if mask to bnx2x accept flags */ -			if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) -				__set_bit(BNX2X_ACCEPT_UNICAST, &accept); - -			if (msg->rx_mask & -					VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST) -				__set_bit(BNX2X_ACCEPT_MULTICAST, &accept); - -			if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST) -				__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept); +	} -			if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST) -				__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept); +	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { +		unsigned long accept = 0; +		struct pf_vf_bulletin_content *bulletin = +					BP_VF_BULLETIN(bp, vf->index); -			if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST) -				__set_bit(BNX2X_ACCEPT_BROADCAST, &accept); +		/* Ignore VF requested mode; instead set a regular mode */ +		if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) { +			__set_bit(BNX2X_ACCEPT_UNICAST, &accept); +			__set_bit(BNX2X_ACCEPT_MULTICAST, &accept); +			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept); +		} -			/* A packet arriving the vf's mac should be accepted -			 * with any vlan -			 */ +		/* A packet arriving the vf's mac should be accepted +		 * with any vlan, unless a vlan has already been +		 * configured. +		 */ +		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))  			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); -			/* set rx-mode */ -			rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, -						   msg->vf_qid, accept); -			if (rc) { -				vfop->rc = rc; -				goto op_err; -			} -			return; -		} -		/* fall through */ - -	case BNX2X_VFOP_MBX_Q_FILTERS_MCAST: -		/* next state */ -		vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE; - -		if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { -			/* set mcasts */ -			rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast, -						  msg->n_multicast, false); -			if (rc) { -				vfop->rc = rc; -				goto op_err; -			} -			return; -		} -		/* fall through */ -op_done: -	case BNX2X_VFOP_MBX_Q_FILTERS_DONE: -		bnx2x_vfop_end(bp, vf, vfop); -		return; -op_err: -	BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", -		  vf->abs_vfid, msg->vf_qid, vfop->rc); -	goto op_done; - -	default: -		bnx2x_vfop_default(state); +		/* set rx-mode */ +		rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept); +		if (rc) +			goto op_err;  	} -} -static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp, -					struct bnx2x_virtf *vf, -					struct bnx2x_vfop_cmd *cmd) -{ -	struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); -	if (vfop) { -		bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS, -				 bnx2x_vfop_mbx_qfilters, cmd->done); -		return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters, -					     cmd->block); +	if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) { +		/* set mcasts */ +		rc = bnx2x_vf_mcast(bp, vf, msg->multicast, +				    msg->n_multicast, false); +		if (rc) +			goto op_err;  	} -	return -ENOMEM; +op_err: +	if (rc) +		BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n", +			  vf->abs_vfid, msg->vf_qid, rc); +	return rc;  } -static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, -				       struct bnx2x_virtf *vf, -				       struct bnx2x_vf_mbx *mbx) +static int bnx2x_filters_validate_mac(struct bnx2x *bp, +				      struct bnx2x_virtf *vf, +				      struct vfpf_set_q_filters_tlv *filters)  { -	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;  	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; +	int rc = 0;  	/* if a mac was already set for this VF via the set vf mac ndo, we only  	 * accept mac configurations of that mac. Why accept them at all? @@ -1604,23 +1609,71 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,  		if (filters->n_mac_vlan_filters > 1) {  			BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",  				  vf->abs_vfid); -			vf->op_rc = -EPERM; +			rc = -EPERM;  			goto response;  		}  		/* ...and only the mac set by the ndo */  		if (filters->n_mac_vlan_filters == 1 && -		    memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) { +		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {  			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",  				  vf->abs_vfid); -			vf->op_rc = -EPERM; +			rc = -EPERM;  			goto response;  		}  	} +response: +	return rc; +} + +static int bnx2x_filters_validate_vlan(struct bnx2x *bp, +				       struct bnx2x_virtf *vf, +				       struct vfpf_set_q_filters_tlv *filters) +{ +	struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); +	int rc = 0; + +	/* if vlan was set by hypervisor we don't allow guest to config vlan */ +	if (bulletin->valid_bitmap & 1 << VLAN_VALID) { +		int i; + +		/* search for vlan filters */ +		for (i = 0; i < filters->n_mac_vlan_filters; i++) { +			if (filters->filters[i].flags & +			    VFPF_Q_FILTER_VLAN_TAG_VALID) { +				BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", +					  vf->abs_vfid); +				rc = -EPERM; +				goto response; +			} +		} +	} +  	/* verify vf_qid */ -	if (filters->vf_qid > vf_rxq_count(vf)) +	if (filters->vf_qid > vf_rxq_count(vf)) { +		rc = -EPERM; +		goto response; +	} + +response: +	return rc; +} + +static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, +				       struct bnx2x_virtf *vf, +				       struct bnx2x_vf_mbx *mbx) +{ +	struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; +	int rc; + +	rc = bnx2x_filters_validate_mac(bp, vf, filters); +	if (rc) +		goto response; + +	rc = bnx2x_filters_validate_vlan(bp, vf, filters); +	if (rc)  		goto response;  	DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", @@ -1630,122 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,  	/* print q_filter message */  	bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters); -	vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd); -	if (vf->op_rc) -		goto response; -	return; - +	rc = bnx2x_vf_mbx_qfilters(bp, vf);  response: -	bnx2x_vf_mbx_resp(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,  				    struct bnx2x_vf_mbx *mbx)  {  	int qid = mbx->msg->req.q_op.vf_qid; -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; +	int rc;  	DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",  	   vf->abs_vfid, qid); -	vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid); -	if (vf->op_rc) -		bnx2x_vf_mbx_resp(bp, vf); +	rc = bnx2x_vf_queue_teardown(bp, vf, qid); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,  				  struct bnx2x_vf_mbx *mbx)  { -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; +	int rc;  	DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid); -	vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); -	if (vf->op_rc) -		bnx2x_vf_mbx_resp(bp, vf); +	rc = bnx2x_vf_close(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,  				    struct bnx2x_vf_mbx *mbx)  { -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; +	int rc;  	DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid); -	vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); -	if (vf->op_rc) -		bnx2x_vf_mbx_resp(bp, vf); +	rc = bnx2x_vf_free(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,  				    struct bnx2x_vf_mbx *mbx)  { -	struct bnx2x_vfop_cmd cmd = { -		.done = bnx2x_vf_mbx_resp, -		.block = false, -	}; -	struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; +	struct bnx2x_config_rss_params rss;  	struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; +	int rc = 0;  	if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||  	    rss_tlv->rss_key_size != T_ETH_RSS_KEY) {  		BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",  			  vf->index); -		vf->op_rc = -EINVAL; +		rc = -EINVAL;  		goto mbx_resp;  	} +	memset(&rss, 0, sizeof(struct bnx2x_config_rss_params)); +  	/* set vfop params according to rss tlv */ -	memcpy(vf_op_params->ind_table, rss_tlv->ind_table, +	memcpy(rss.ind_table, rss_tlv->ind_table,  	       T_ETH_INDIRECTION_TABLE_SIZE); -	memcpy(vf_op_params->rss_key, rss_tlv->rss_key, -	       sizeof(rss_tlv->rss_key)); -	vf_op_params->rss_obj = &vf->rss_conf_obj; -	vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; +	memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key)); +	rss.rss_obj = &vf->rss_conf_obj; +	rss.rss_result_mask = rss_tlv->rss_result_mask;  	/* flags handled individually for backward/forward compatability */ +	rss.rss_flags = 0; +	rss.ramrod_flags = 0; +  	if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) -		__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) -		__set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) -		__set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV4) -		__set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) -		__set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) -		__set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV6) -		__set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) -		__set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);  	if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) -		__set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); +		__set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);  	if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&  	     rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||  	    (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&  	     rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {  		BNX2X_ERR("about to hit a FW assert. aborting...\n"); -		vf->op_rc = -EINVAL; +		rc = -EINVAL;  		goto mbx_resp;  	} -	vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); +	rc = bnx2x_vf_rss_update(bp, vf, &rss); +mbx_resp: +	bnx2x_vf_mbx_resp(bp, vf, rc); +} + +static int bnx2x_validate_tpa_params(struct bnx2x *bp, +				       struct vfpf_tpa_tlv *tpa_tlv) +{ +	int rc = 0; + +	if (tpa_tlv->tpa_client_info.max_sges_for_packet > +	    U_ETH_MAX_SGES_FOR_PACKET) { +		rc = -EINVAL; +		BNX2X_ERR("TPA update: max_sges received %d, max is %d\n", +			  tpa_tlv->tpa_client_info.max_sges_for_packet, +			  U_ETH_MAX_SGES_FOR_PACKET); +	} + +	if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) { +		rc = -EINVAL; +		BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n", +			  tpa_tlv->tpa_client_info.max_tpa_queues, +			  MAX_AGG_QS(bp)); +	} + +	return rc; +} + +static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf, +				    struct bnx2x_vf_mbx *mbx) +{ +	struct bnx2x_queue_update_tpa_params vf_op_params; +	struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; +	int rc = 0; + +	memset(&vf_op_params, 0, sizeof(vf_op_params)); + +	if (bnx2x_validate_tpa_params(bp, tpa_tlv)) +		goto mbx_resp; + +	vf_op_params.complete_on_both_clients = +		tpa_tlv->tpa_client_info.complete_on_both_clients; +	vf_op_params.dont_verify_thr = +		tpa_tlv->tpa_client_info.dont_verify_thr; +	vf_op_params.max_agg_sz = +		tpa_tlv->tpa_client_info.max_agg_size; +	vf_op_params.max_sges_pkt = +		tpa_tlv->tpa_client_info.max_sges_for_packet; +	vf_op_params.max_tpa_queues = +		tpa_tlv->tpa_client_info.max_tpa_queues; +	vf_op_params.sge_buff_sz = +		tpa_tlv->tpa_client_info.sge_buff_size; +	vf_op_params.sge_pause_thr_high = +		tpa_tlv->tpa_client_info.sge_pause_thr_high; +	vf_op_params.sge_pause_thr_low = +		tpa_tlv->tpa_client_info.sge_pause_thr_low; +	vf_op_params.tpa_mode = +		tpa_tlv->tpa_client_info.tpa_mode; +	vf_op_params.update_ipv4 = +		tpa_tlv->tpa_client_info.update_ipv4; +	vf_op_params.update_ipv6 = +		tpa_tlv->tpa_client_info.update_ipv6; + +	rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);  mbx_resp: -	if (vf->op_rc) -		bnx2x_vf_mbx_resp(bp, vf); +	bnx2x_vf_mbx_resp(bp, vf, rc);  }  /* dispatch request */ @@ -1787,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,  		case CHANNEL_TLV_UPDATE_RSS:  			bnx2x_vf_mbx_update_rss(bp, vf, mbx);  			return; +		case CHANNEL_TLV_UPDATE_TPA: +			bnx2x_vf_mbx_update_tpa(bp, vf, mbx); +			return;  		}  	} else { @@ -1806,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,  	/* can we respond to VF (do we have an address for it?) */  	if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { -		/* mbx_resp uses the op_rc of the VF */ -		vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; -  		/* notify the VF that we do not support this request */ -		bnx2x_vf_mbx_resp(bp, vf); +		bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);  	} else {  		/* can't send a response since this VF is unknown to us  		 * just ack the FW to release the mailbox and unlock @@ -1823,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,  	}  } -/* handle new vf-pf message */ -void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event) +void bnx2x_vf_mbx_schedule(struct bnx2x *bp, +			   struct vf_pf_event_data *vfpf_event)  { -	struct bnx2x_virtf *vf; -	struct bnx2x_vf_mbx *mbx;  	u8 vf_idx; -	int rc;  	DP(BNX2X_MSG_IOV,  	   "vf pf event received: vfid %d, address_hi %x, address lo %x", @@ -1841,47 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)  	    BNX2X_NR_VIRTFN(bp)) {  		BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",  			  vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp)); -		goto mbx_done; +		return;  	} +  	vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id); -	mbx = BP_VF_MBX(bp, vf_idx); -	/* verify an event is not currently being processed - -	 * debug failsafe only -	 */ -	if (mbx->flags & VF_MSG_INPROCESS) { -		BNX2X_ERR("Previous message is still being processed, vf_id %d\n", -			  vfpf_event->vf_id); -		goto mbx_done; -	} -	vf = BP_VF(bp, vf_idx); +	/* Update VFDB with current message and schedule its handling */ +	mutex_lock(&BP_VFDB(bp)->event_mutex); +	BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; +	BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; +	BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); +	mutex_unlock(&BP_VFDB(bp)->event_mutex); -	/* save the VF message address */ -	mbx->vf_addr_hi = vfpf_event->msg_addr_hi; -	mbx->vf_addr_lo = vfpf_event->msg_addr_lo; -	DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n", -	   mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset); +	bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG); +} -	/* dmae to get the VF request */ -	rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid, -				  mbx->vf_addr_hi, mbx->vf_addr_lo, -				  sizeof(union vfpf_tlvs)/4); -	if (rc) { -		BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid); -		goto mbx_error; -	} +/* handle new vf-pf messages */ +void bnx2x_vf_mbx(struct bnx2x *bp) +{ +	struct bnx2x_vfdb *vfdb = BP_VFDB(bp); +	u64 events; +	u8 vf_idx; +	int rc; -	/* process the VF message header */ -	mbx->first_tlv = mbx->msg->req.first_tlv; +	if (!vfdb) +		return; -	/* dispatch the request (will prepare the response) */ -	bnx2x_vf_mbx_request(bp, vf, mbx); -	goto mbx_done; +	mutex_lock(&vfdb->event_mutex); +	events = vfdb->event_occur; +	vfdb->event_occur = 0; +	mutex_unlock(&vfdb->event_mutex); -mbx_error: -	bnx2x_vf_release(bp, vf, false); /* non blocking */ -mbx_done: -	return; +	for_each_vf(bp, vf_idx) { +		struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx); +		struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + +		/* Handle VFs which have pending events */ +		if (!(events & (1ULL << vf_idx))) +			continue; + +		DP(BNX2X_MSG_IOV, +		   "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", +		   vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo, +		   mbx->first_tlv.resp_msg_offset); + +		/* dmae to get the VF request */ +		rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, +					  vf->abs_vfid, mbx->vf_addr_hi, +					  mbx->vf_addr_lo, +					  sizeof(union vfpf_tlvs)/4); +		if (rc) { +			BNX2X_ERR("Failed to copy request VF %d\n", +				  vf->abs_vfid); +			bnx2x_vf_release(bp, vf); +			return; +		} + +		/* process the VF message header */ +		mbx->first_tlv = mbx->msg->req.first_tlv; + +		/* Clean response buffer to refrain from falsely +		 * seeing chains. +		 */ +		memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs)); + +		/* dispatch the request (will prepare the response) */ +		bnx2x_vf_mbx_request(bp, vf, mbx); +	}  }  /* propagate local bulletin board to vf */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index 1179fe06d0c..e21e706762c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -12,8 +12,8 @@   * license other than the GPL, without Broadcom's express prior written   * consent.   * - * Maintained by: Eilon Greenstein <eilong@broadcom.com> - * Written by: Ariel Elior <ariele@broadcom.com> + * Maintained by: Ariel Elior <ariel.elior@qlogic.com> + * Written by: Ariel Elior <ariel.elior@qlogic.com>   */  #ifndef VF_PF_IF_H  #define VF_PF_IF_H @@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {  #define PFVF_CAP_RSS		0x00000001  #define PFVF_CAP_DHC		0x00000002  #define PFVF_CAP_TPA		0x00000004 +#define PFVF_CAP_TPA_UPDATE	0x00000008  		char fw_ver[32];  		u16 db_size;  		u8  indices_per_sb; @@ -188,6 +189,12 @@ struct pfvf_acquire_resp_tlv {  	} resc;  }; +struct vfpf_port_phys_id_resp_tlv { +	struct channel_tlv tl; +	u8 id[ETH_ALEN]; +	u8 padding[2]; +}; +  #define VFPF_INIT_FLG_STATS_COALESCE	(1 << 0) /* when set the VFs queues  						  * stats will be coalesced on  						  * the leading RSS queue @@ -297,6 +304,25 @@ struct vfpf_set_q_filters_tlv {  	u32 rx_mask;	/* see mask constants at the top of the file */  }; +struct vfpf_tpa_tlv { +	struct vfpf_first_tlv	first_tlv; + +	struct vf_pf_tpa_client_info { +		aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF]; +		u8 update_ipv4; +		u8 update_ipv6; +		u8 max_tpa_queues; +		u8 max_sges_for_packet; +		u8 complete_on_both_clients; +		u8 dont_verify_thr; +		u8 tpa_mode; +		u16 sge_buff_size; +		u16 max_agg_size; +		u16 sge_pause_thr_low; +		u16 sge_pause_thr_high; +	} tpa_client_info; +}; +  /* close VF (disable VF) */  struct vfpf_close_tlv {  	struct vfpf_first_tlv   first_tlv; @@ -325,6 +351,7 @@ union vfpf_tlvs {  	struct vfpf_set_q_filters_tlv	set_q_filters;  	struct vfpf_release_tlv		release;  	struct vfpf_rss_tlv		update_rss; +	struct vfpf_tpa_tlv		update_tpa;  	struct channel_list_end_tlv	list_end;  	struct tlv_buffer_size		tlv_buf_size;  }; @@ -398,6 +425,8 @@ enum channel_tlvs {  	CHANNEL_TLV_PF_SET_MAC,  	CHANNEL_TLV_PF_SET_VLAN,  	CHANNEL_TLV_UPDATE_RSS, +	CHANNEL_TLV_PHYS_PORT_ID, +	CHANNEL_TLV_UPDATE_TPA,  	CHANNEL_TLV_MAX  }; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 99394bd49a1..8244e2b14bb 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1,6 +1,6 @@  /* cnic.c: Broadcom CNIC core network driver.   * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by @@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,  	while (retry < 3) {  		rc = 0;  		rcu_read_lock(); -		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); +		ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);  		if (ulp_ops)  			rc = ulp_ops->iscsi_nl_send_msg(  				cp->ulp_handle[CNIC_ULP_ISCSI], @@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,  			csk->vlan_id = path_resp->vlan_id; -			memcpy(csk->ha, path_resp->mac_addr, 6); +			memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);  			if (test_bit(SK_F_IPV6, &csk->flags))  				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,  				       sizeof(struct in6_addr)); @@ -436,7 +436,7 @@ static int cnic_offld_prep(struct cnic_sock *csk)  static int cnic_close_prep(struct cnic_sock *csk)  {  	clear_bit(SK_F_CONNECT_START, &csk->flags); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {  		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) @@ -450,7 +450,7 @@ static int cnic_close_prep(struct cnic_sock *csk)  static int cnic_abort_prep(struct cnic_sock *csk)  {  	clear_bit(SK_F_CONNECT_START, &csk->flags); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))  		msleep(1); @@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)  		pr_err("%s: Bad type %d\n", __func__, ulp_type);  		return -EINVAL;  	} + +	if (ulp_type == CNIC_ULP_ISCSI) +		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); +  	mutex_lock(&cnic_lock);  	if (rcu_dereference(cp->ulp_ops[ulp_type])) {  		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); @@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)  	}  	mutex_unlock(&cnic_lock); -	if (ulp_type == CNIC_ULP_ISCSI) -		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); -	else if (ulp_type == CNIC_ULP_FCOE) +	if (ulp_type == CNIC_ULP_FCOE)  		dev->fcoe_cap = NULL;  	synchronize_rcu(); @@ -726,7 +728,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)  	for (i = 0; i < dma->num_pages; i++) {  		if (dma->pg_arr[i]) { -			dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, +			dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,  					  dma->pg_arr[i], dma->pg_map_arr[i]);  			dma->pg_arr[i] = NULL;  		} @@ -785,7 +787,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,  	for (i = 0; i < pages; i++) {  		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, -						    BNX2_PAGE_SIZE, +						    CNIC_PAGE_SIZE,  						    &dma->pg_map_arr[i],  						    GFP_ATOMIC);  		if (dma->pg_arr[i] == NULL) @@ -794,8 +796,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,  	if (!use_pg_tbl)  		return 0; -	dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & -			  ~(BNX2_PAGE_SIZE - 1); +	dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & +			  ~(CNIC_PAGE_SIZE - 1);  	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,  					&dma->pgtbl_map, GFP_ATOMIC);  	if (dma->pgtbl == NULL) @@ -900,8 +902,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)  	if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {  		int i, k, arr_size; -		cp->ctx_blk_size = BNX2_PAGE_SIZE; -		cp->cids_per_blk = BNX2_PAGE_SIZE / 128; +		cp->ctx_blk_size = CNIC_PAGE_SIZE; +		cp->cids_per_blk = CNIC_PAGE_SIZE / 128;  		arr_size = BNX2_MAX_CID / cp->cids_per_blk *  			   sizeof(struct cnic_ctx);  		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); @@ -933,7 +935,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)  		for (i = 0; i < cp->ctx_blks; i++) {  			cp->ctx_arr[i].ctx =  				dma_alloc_coherent(&dev->pcidev->dev, -						   BNX2_PAGE_SIZE, +						   CNIC_PAGE_SIZE,  						   &cp->ctx_arr[i].mapping,  						   GFP_KERNEL);  			if (cp->ctx_arr[i].ctx == NULL) @@ -1013,7 +1015,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)  	if (udev->l2_ring)  		return 0; -	udev->l2_ring_size = pages * BNX2_PAGE_SIZE; +	udev->l2_ring_size = pages * CNIC_PAGE_SIZE;  	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,  					   &udev->l2_ring_map,  					   GFP_KERNEL | __GFP_COMP); @@ -1021,7 +1023,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)  		return -ENOMEM;  	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; -	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); +	udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);  	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,  					  &udev->l2_buf_map,  					  GFP_KERNEL | __GFP_COMP); @@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)  	struct cnic_local *cp = dev->cnic_priv;  	struct cnic_uio_dev *udev; -	read_lock(&cnic_dev_lock);  	list_for_each_entry(udev, &cnic_udev_list, list) {  		if (udev->pdev == dev->pcidev) {  			udev->dev = dev;  			if (__cnic_alloc_uio_rings(udev, pages)) {  				udev->dev = NULL; -				read_unlock(&cnic_dev_lock);  				return -ENOMEM;  			}  			cp->udev = udev; -			read_unlock(&cnic_dev_lock);  			return 0;  		}  	} -	read_unlock(&cnic_dev_lock);  	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);  	if (!udev) @@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)  	if (__cnic_alloc_uio_rings(udev, pages))  		goto err_udev; -	write_lock(&cnic_dev_lock);  	list_add(&udev->list, &cnic_udev_list); -	write_unlock(&cnic_dev_lock);  	pci_dev_get(udev->pdev); @@ -1102,7 +1098,7 @@ static int cnic_init_uio(struct cnic_dev *dev)  		uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +  						     TX_MAX_TSS_RINGS + 1);  		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & -					PAGE_MASK; +					CNIC_PAGE_MASK;  		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)  			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;  		else @@ -1113,7 +1109,7 @@ static int cnic_init_uio(struct cnic_dev *dev)  		uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);  		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & -			PAGE_MASK; +			CNIC_PAGE_MASK;  		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);  		uinfo->name = "bnx2x_cnic"; @@ -1267,14 +1263,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)  	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)  		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; -	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / -		PAGE_SIZE; +	pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / +		CNIC_PAGE_SIZE;  	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);  	if (ret)  		return -ENOMEM; -	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; +	n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;  	for (i = 0, j = 0; i < cp->max_cid_space; i++) {  		long off = CNIC_KWQ16_DATA_SIZE * (i % n); @@ -1296,7 +1292,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)  			goto error;  	} -	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; +	pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;  	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);  	if (ret)  		goto error; @@ -1466,8 +1462,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)  	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *  			BNX2X_ISCSI_R2TQE_SIZE;  	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; -	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; -	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); +	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; +	hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);  	cp->num_cqs = req1->num_cqs;  	if (!dev->max_iscsi_conn) @@ -1477,9 +1473,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)  	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),  		  req1->rq_num_wqes);  	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), -		  PAGE_SIZE); +		  CNIC_PAGE_SIZE);  	CNIC_WR8(dev, BAR_TSTRORM_INTMEM + -		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); +		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);  	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +  		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),  		  req1->num_tasks_per_conn); @@ -1489,9 +1485,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)  		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),  		  req1->rq_buffer_size);  	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), -		  PAGE_SIZE); +		  CNIC_PAGE_SIZE);  	CNIC_WR8(dev, BAR_USTRORM_INTMEM + -		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); +		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);  	CNIC_WR16(dev, BAR_USTRORM_INTMEM +  		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),  		  req1->num_tasks_per_conn); @@ -1504,9 +1500,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)  	/* init Xstorm RAM */  	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), -		  PAGE_SIZE); +		  CNIC_PAGE_SIZE);  	CNIC_WR8(dev, BAR_XSTRORM_INTMEM + -		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); +		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);  	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +  		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),  		  req1->num_tasks_per_conn); @@ -1519,9 +1515,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)  	/* init Cstorm RAM */  	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), -		  PAGE_SIZE); +		  CNIC_PAGE_SIZE);  	CNIC_WR8(dev, BAR_CSTRORM_INTMEM + -		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); +		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);  	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +  		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),  		  req1->num_tasks_per_conn); @@ -1623,18 +1619,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)  	}  	ctx->cid = cid; -	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; +	pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;  	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);  	if (ret)  		goto error; -	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; +	pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;  	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);  	if (ret)  		goto error; -	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; +	pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;  	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);  	if (ret)  		goto error; @@ -1760,7 +1756,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],  	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;  	/* TSTORM requires the base address of RQ DB & not PTE */  	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = -		req2->rq_page_table_addr_lo & PAGE_MASK; +		req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;  	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =  		req2->rq_page_table_addr_hi;  	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; @@ -1842,7 +1838,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],  	/* CSTORM and USTORM initialization is different, CSTORM requires  	 * CQ DB base & not PTE addr */  	ictx->cstorm_st_context.cq_db_base.lo = -		req1->cq_page_table_addr_lo & PAGE_MASK; +		req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;  	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;  	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;  	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; @@ -2911,7 +2907,7 @@ static int cnic_l2_completion(struct cnic_local *cp)  	u16 hw_cons, sw_cons;  	struct cnic_uio_dev *udev = cp->udev;  	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) -					(udev->l2_ring + (2 * BNX2_PAGE_SIZE)); +					(udev->l2_ring + (2 * CNIC_PAGE_SIZE));  	u32 cmd;  	int comp = 0; @@ -3244,7 +3240,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)  	int rc;  	mutex_lock(&cnic_lock); -	ulp_ops = cnic_ulp_tbl_prot(ulp_type); +	ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], +					    lockdep_is_held(&cnic_lock));  	if (ulp_ops && ulp_ops->cnic_get_stats)  		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);  	else @@ -3645,7 +3642,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)  	csk_hold(csk);  	clear_bit(SK_F_INUSE, &csk->flags); -	smp_mb__after_clear_bit(); +	smp_mb__after_atomic();  	while (atomic_read(&csk->ref_count) != 1)  		msleep(1);  	cnic_cm_cleanup(csk); @@ -4025,7 +4022,7 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)  			 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)  			set_bit(SK_F_HW_ERR, &csk->flags); -		smp_mb__before_clear_bit(); +		smp_mb__before_atomic();  		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);  		cnic_cm_upcall(cp, csk, opcode);  		break; @@ -4384,7 +4381,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)  		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;  		u32 val; -		memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); +		memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);  		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,  			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); @@ -4628,7 +4625,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)  		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);  	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); -	rxbd = udev->l2_ring + BNX2_PAGE_SIZE; +	rxbd = udev->l2_ring + CNIC_PAGE_SIZE;  	for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {  		dma_addr_t buf_map;  		int n = (i % cp->l2_rx_ring_size) + 1; @@ -4639,11 +4636,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)  		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;  		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;  	} -	val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; +	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;  	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);  	rxbd->rx_bd_haddr_hi = val; -	val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; +	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;  	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);  	rxbd->rx_bd_haddr_lo = val; @@ -4709,10 +4706,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)  	val = CNIC_RD(dev, BNX2_MQ_CONFIG);  	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; -	if (BNX2_PAGE_BITS > 12) +	if (CNIC_PAGE_BITS > 12)  		val |= (12 - 8)  << 4;  	else -		val |= (BNX2_PAGE_BITS - 8)  << 4; +		val |= (CNIC_PAGE_BITS - 8)  << 4;  	CNIC_WR(dev, BNX2_MQ_CONFIG, val); @@ -4742,13 +4739,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)  	/* Initialize the kernel work queue context. */  	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | -	      (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; +	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;  	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); -	val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; +	val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;  	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); -	val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; +	val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;  	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);  	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); @@ -4768,13 +4765,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)  	/* Initialize the kernel complete queue context. */  	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | -	      (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; +	      (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;  	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); -	val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; +	val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;  	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); -	val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; +	val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;  	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);  	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); @@ -4918,7 +4915,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,  	u32 cli = cp->ethdev->iscsi_l2_client_id;  	u32 val; -	memset(txbd, 0, BNX2_PAGE_SIZE); +	memset(txbd, 0, CNIC_PAGE_SIZE);  	buf_map = udev->l2_buf_map;  	for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { @@ -4978,9 +4975,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,  	struct bnx2x *bp = netdev_priv(dev->netdev);  	struct cnic_uio_dev *udev = cp->udev;  	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + -				BNX2_PAGE_SIZE); +				CNIC_PAGE_SIZE);  	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) -				(udev->l2_ring + (2 * BNX2_PAGE_SIZE)); +				(udev->l2_ring + (2 * CNIC_PAGE_SIZE));  	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;  	int i;  	u32 cli = cp->ethdev->iscsi_l2_client_id; @@ -5004,20 +5001,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,  		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);  	} -	val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; +	val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;  	rxbd->addr_hi = cpu_to_le32(val);  	data->rx.bd_page_base.hi = cpu_to_le32(val); -	val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; +	val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;  	rxbd->addr_lo = cpu_to_le32(val);  	data->rx.bd_page_base.lo = cpu_to_le32(val);  	rxcqe += BNX2X_MAX_RCQ_DESC_CNT; -	val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; +	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;  	rxcqe->addr_hi = cpu_to_le32(val);  	data->rx.cqe_page_base.hi = cpu_to_le32(val); -	val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; +	val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;  	rxcqe->addr_lo = cpu_to_le32(val);  	data->rx.cqe_page_base.lo = cpu_to_le32(val); @@ -5220,6 +5217,7 @@ static void cnic_init_rings(struct cnic_dev *dev)  		cnic_ring_ctl(dev, cid, cli, 1);  		*cid_ptr = cid >> 4;  		*(cid_ptr + 1) = cid * bp->db_size; +		*(cid_ptr + 2) = UIO_USE_TX_DOORBELL;  	}  } @@ -5264,8 +5262,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)  		msleep(10);  	}  	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); -	rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; -	memset(rx_ring, 0, BNX2_PAGE_SIZE); +	rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; +	memset(rx_ring, 0, CNIC_PAGE_SIZE);  }  static int cnic_register_netdev(struct cnic_dev *dev) @@ -5572,7 +5570,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)  	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)  		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; -	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); +	memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);  	cp->cnic_ops = &cnic_bnx2x_ops;  	cp->start_hw = cnic_start_bnx2x_hw; @@ -5622,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,  {  	int if_type; -	rcu_read_lock();  	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {  		struct cnic_ulp_ops *ulp_ops;  		void *ctx; -		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); -		if (!ulp_ops || !ulp_ops->indicate_netevent) +		mutex_lock(&cnic_lock); +		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], +						lockdep_is_held(&cnic_lock)); +		if (!ulp_ops || !ulp_ops->indicate_netevent) { +			mutex_unlock(&cnic_lock);  			continue; +		}  		ctx = cp->ulp_handle[if_type]; +		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); +		mutex_unlock(&cnic_lock); +  		ulp_ops->indicate_netevent(ctx, event, vlan_id); + +		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);  	} -	rcu_read_unlock();  }  /* netdev event handler */ diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0121a5d5519..d535ae4228b 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -1,6 +1,6 @@  /* cnic.h: Broadcom CNIC core network driver.   * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by @@ -186,6 +186,8 @@ struct kcq_info {  	u16		(*hw_idx)(u16);  }; +#define UIO_USE_TX_DOORBELL 0x017855DB +  struct cnic_uio_dev {  	struct uio_info		cnic_uinfo;  	u32			uio_dev; diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9..dcbca6997e8 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h @@ -1,7 +1,7 @@  /* cnic.c: Broadcom CNIC core network driver.   * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 0658b43e148..5f4d5573a73 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -1,6 +1,6 @@  /* cnic_if.h: Broadcom CNIC core network driver.   * - * Copyright (c) 2006-2013 Broadcom Corporation + * Copyright (c) 2006-2014 Broadcom Corporation   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License as published by @@ -14,8 +14,8 @@  #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION	"2.5.18" -#define CNIC_MODULE_RELDATE	"Sept 01, 2013" +#define CNIC_MODULE_VERSION	"2.5.20" +#define CNIC_MODULE_RELDATE	"March 14, 2014"  #define CNIC_ULP_RDMA		0  #define CNIC_ULP_ISCSI		1 @@ -24,6 +24,16 @@  #define MAX_CNIC_ULP_TYPE_EXT	3  #define MAX_CNIC_ULP_TYPE	4 +/* Use CPU native page size up to 16K for cnic ring sizes.  */ +#if (PAGE_SHIFT > 14) +#define CNIC_PAGE_BITS	14 +#else +#define CNIC_PAGE_BITS	PAGE_SHIFT +#endif +#define CNIC_PAGE_SIZE	(1 << (CNIC_PAGE_BITS)) +#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) +#define CNIC_PAGE_MASK	(~((CNIC_PAGE_SIZE) - 1)) +  struct kwqe {  	u32 kwqe_op_flag; @@ -353,8 +363,8 @@ struct cnic_ulp_ops {  	atomic_t ref_count;  }; -extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); +int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); -extern int cnic_unregister_driver(int ulp_type); +int cnic_unregister_driver(int ulp_type);  #endif diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile new file mode 100644 index 00000000000..31f55a90a19 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCMGENET) += genet.o +genet-objs := bcmgenet.o bcmmii.o diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c new file mode 100644 index 00000000000..4e615debe47 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -0,0 +1,2584 @@ +/* + * Broadcom GENET (Gigabit Ethernet) controller driver + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#define pr_fmt(fmt)				"bcmgenet: " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/interrupt.h> +#include <linux/string.h> +#include <linux/if_ether.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <net/arp.h> + +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/phy.h> + +#include <asm/unaligned.h> + +#include "bcmgenet.h" + +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT	4 + +/* Default highest priority queue for multi queue support */ +#define GENET_Q0_PRIORITY	0 + +#define GENET_DEFAULT_BD_CNT	\ +	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt) + +#define RX_BUF_LENGTH		2048 +#define SKB_ALIGNMENT		32 + +/* Tx/Rx DMA register offset, skip 256 descriptors */ +#define WORDS_PER_BD(p)		(p->hw_params->words_per_bd) +#define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32)) + +#define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \ +				TOTAL_DESC * DMA_DESC_SIZE) + +#define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \ +				TOTAL_DESC * DMA_DESC_SIZE) + +static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, +						void __iomem *d, u32 value) +{ +	__raw_writel(value, d + DMA_DESC_LENGTH_STATUS); +} + +static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv, +						void __iomem *d) +{ +	return __raw_readl(d + DMA_DESC_LENGTH_STATUS); +} + +static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, +				    void __iomem *d, +				    dma_addr_t addr) +{ +	__raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO); + +	/* Register writes to GISB bus can take couple hundred nanoseconds +	 * and are done for each packet, save these expensive writes unless +	 * the platform is explicitely configured for 64-bits/LPAE. +	 */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT +	if (priv->hw_params->flags & GENET_HAS_40BITS) +		__raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); +#endif +} + +/* Combined address + length/status setter */ +static inline void dmadesc_set(struct bcmgenet_priv *priv, +				void __iomem *d, dma_addr_t addr, u32 val) +{ +	dmadesc_set_length_status(priv, d, val); +	dmadesc_set_addr(priv, d, addr); +} + +static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, +					  void __iomem *d) +{ +	dma_addr_t addr; + +	addr = __raw_readl(d + DMA_DESC_ADDRESS_LO); + +	/* Register writes to GISB bus can take couple hundred nanoseconds +	 * and are done for each packet, save these expensive writes unless +	 * the platform is explicitely configured for 64-bits/LPAE. +	 */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT +	if (priv->hw_params->flags & GENET_HAS_40BITS) +		addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32; +#endif +	return addr; +} + +#define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x" + +#define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \ +				NETIF_MSG_LINK) + +static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv) +{ +	if (GENET_IS_V1(priv)) +		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1); +	else +		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL); +} + +static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ +	if (GENET_IS_V1(priv)) +		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1); +	else +		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL); +} + +/* These macros are defined to deal with register map change + * between GENET1.1 and GENET2. Only those currently being used + * by driver are defined. + */ +static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv) +{ +	if (GENET_IS_V1(priv)) +		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1); +	else +		return __raw_readl(priv->base + +				priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val) +{ +	if (GENET_IS_V1(priv)) +		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1); +	else +		__raw_writel(val, priv->base + +				priv->hw_params->tbuf_offset + TBUF_CTRL); +} + +static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv) +{ +	if (GENET_IS_V1(priv)) +		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1); +	else +		return __raw_readl(priv->base + +				priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val) +{ +	if (GENET_IS_V1(priv)) +		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1); +	else +		__raw_writel(val, priv->base + +				priv->hw_params->tbuf_offset + TBUF_BP_MC); +} + +/* RX/TX DMA register accessors */ +enum dma_reg { +	DMA_RING_CFG = 0, +	DMA_CTRL, +	DMA_STATUS, +	DMA_SCB_BURST_SIZE, +	DMA_ARB_CTRL, +	DMA_PRIORITY, +	DMA_RING_PRIORITY, +}; + +static const u8 bcmgenet_dma_regs_v3plus[] = { +	[DMA_RING_CFG]		= 0x00, +	[DMA_CTRL]		= 0x04, +	[DMA_STATUS]		= 0x08, +	[DMA_SCB_BURST_SIZE]	= 0x0C, +	[DMA_ARB_CTRL]		= 0x2C, +	[DMA_PRIORITY]		= 0x30, +	[DMA_RING_PRIORITY]	= 0x38, +}; + +static const u8 bcmgenet_dma_regs_v2[] = { +	[DMA_RING_CFG]		= 0x00, +	[DMA_CTRL]		= 0x04, +	[DMA_STATUS]		= 0x08, +	[DMA_SCB_BURST_SIZE]	= 0x0C, +	[DMA_ARB_CTRL]		= 0x30, +	[DMA_PRIORITY]		= 0x34, +	[DMA_RING_PRIORITY]	= 0x3C, +}; + +static const u8 bcmgenet_dma_regs_v1[] = { +	[DMA_CTRL]		= 0x00, +	[DMA_STATUS]		= 0x04, +	[DMA_SCB_BURST_SIZE]	= 0x0C, +	[DMA_ARB_CTRL]		= 0x30, +	[DMA_PRIORITY]		= 0x34, +	[DMA_RING_PRIORITY]	= 0x3C, +}; + +/* Set at runtime once bcmgenet version is known */ +static const u8 *bcmgenet_dma_regs; + +static inline struct bcmgenet_priv *dev_to_priv(struct device *dev) +{ +	return netdev_priv(dev_get_drvdata(dev)); +} + +static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv, +					enum dma_reg r) +{ +	return __raw_readl(priv->base + GENET_TDMA_REG_OFF + +			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv, +					u32 val, enum dma_reg r) +{ +	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF + +			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv, +					enum dma_reg r) +{ +	return __raw_readl(priv->base + GENET_RDMA_REG_OFF + +			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv, +					u32 val, enum dma_reg r) +{ +	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF + +			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]); +} + +/* RDMA/TDMA ring registers and accessors + * we merge the common fields and just prefix with T/D the registers + * having different meaning depending on the direction + */ +enum dma_ring_reg { +	TDMA_READ_PTR = 0, +	RDMA_WRITE_PTR = TDMA_READ_PTR, +	TDMA_READ_PTR_HI, +	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI, +	TDMA_CONS_INDEX, +	RDMA_PROD_INDEX = TDMA_CONS_INDEX, +	TDMA_PROD_INDEX, +	RDMA_CONS_INDEX = TDMA_PROD_INDEX, +	DMA_RING_BUF_SIZE, +	DMA_START_ADDR, +	DMA_START_ADDR_HI, +	DMA_END_ADDR, +	DMA_END_ADDR_HI, +	DMA_MBUF_DONE_THRESH, +	TDMA_FLOW_PERIOD, +	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD, +	TDMA_WRITE_PTR, +	RDMA_READ_PTR = TDMA_WRITE_PTR, +	TDMA_WRITE_PTR_HI, +	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI +}; + +/* GENET v4 supports 40-bits pointer addressing + * for obvious reasons the LO and HI word parts + * are contiguous, but this offsets the other + * registers. + */ +static const u8 genet_dma_ring_regs_v4[] = { +	[TDMA_READ_PTR]			= 0x00, +	[TDMA_READ_PTR_HI]		= 0x04, +	[TDMA_CONS_INDEX]		= 0x08, +	[TDMA_PROD_INDEX]		= 0x0C, +	[DMA_RING_BUF_SIZE]		= 0x10, +	[DMA_START_ADDR]		= 0x14, +	[DMA_START_ADDR_HI]		= 0x18, +	[DMA_END_ADDR]			= 0x1C, +	[DMA_END_ADDR_HI]		= 0x20, +	[DMA_MBUF_DONE_THRESH]		= 0x24, +	[TDMA_FLOW_PERIOD]		= 0x28, +	[TDMA_WRITE_PTR]		= 0x2C, +	[TDMA_WRITE_PTR_HI]		= 0x30, +}; + +static const u8 genet_dma_ring_regs_v123[] = { +	[TDMA_READ_PTR]			= 0x00, +	[TDMA_CONS_INDEX]		= 0x04, +	[TDMA_PROD_INDEX]		= 0x08, +	[DMA_RING_BUF_SIZE]		= 0x0C, +	[DMA_START_ADDR]		= 0x10, +	[DMA_END_ADDR]			= 0x14, +	[DMA_MBUF_DONE_THRESH]		= 0x18, +	[TDMA_FLOW_PERIOD]		= 0x1C, +	[TDMA_WRITE_PTR]		= 0x20, +}; + +/* Set at runtime once GENET version is known */ +static const u8 *genet_dma_ring_regs; + +static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv, +						unsigned int ring, +						enum dma_ring_reg r) +{ +	return __raw_readl(priv->base + GENET_TDMA_REG_OFF + +			(DMA_RING_SIZE * ring) + +			genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv, +						unsigned int ring, +						u32 val, +						enum dma_ring_reg r) +{ +	__raw_writel(val, priv->base + GENET_TDMA_REG_OFF + +			(DMA_RING_SIZE * ring) + +			genet_dma_ring_regs[r]); +} + +static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv, +						unsigned int ring, +						enum dma_ring_reg r) +{ +	return __raw_readl(priv->base + GENET_RDMA_REG_OFF + +			(DMA_RING_SIZE * ring) + +			genet_dma_ring_regs[r]); +} + +static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv, +						unsigned int ring, +						u32 val, +						enum dma_ring_reg r) +{ +	__raw_writel(val, priv->base + GENET_RDMA_REG_OFF + +			(DMA_RING_SIZE * ring) + +			genet_dma_ring_regs[r]); +} + +static int bcmgenet_get_settings(struct net_device *dev, +		struct ethtool_cmd *cmd) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	if (!netif_running(dev)) +		return -EINVAL; + +	if (!priv->phydev) +		return -ENODEV; + +	return phy_ethtool_gset(priv->phydev, cmd); +} + +static int bcmgenet_set_settings(struct net_device *dev, +		struct ethtool_cmd *cmd) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	if (!netif_running(dev)) +		return -EINVAL; + +	if (!priv->phydev) +		return -ENODEV; + +	return phy_ethtool_sset(priv->phydev, cmd); +} + +static int bcmgenet_set_rx_csum(struct net_device *dev, +				netdev_features_t wanted) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	u32 rbuf_chk_ctrl; +	bool rx_csum_en; + +	rx_csum_en = !!(wanted & NETIF_F_RXCSUM); + +	rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL); + +	/* enable rx checksumming */ +	if (rx_csum_en) +		rbuf_chk_ctrl |= RBUF_RXCHK_EN; +	else +		rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; +	priv->desc_rxchk_en = rx_csum_en; + +	/* If UniMAC forwards CRC, we need to skip over it to get +	 * a valid CHK bit to be set in the per-packet status word +	*/ +	if (rx_csum_en && priv->crc_fwd_en) +		rbuf_chk_ctrl |= RBUF_SKIP_FCS; +	else +		rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; + +	bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); + +	return 0; +} + +static int bcmgenet_set_tx_csum(struct net_device *dev, +				netdev_features_t wanted) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	bool desc_64b_en; +	u32 tbuf_ctrl, rbuf_ctrl; + +	tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); +	rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); + +	desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + +	/* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ +	if (desc_64b_en) { +		tbuf_ctrl |= RBUF_64B_EN; +		rbuf_ctrl |= RBUF_64B_EN; +	} else { +		tbuf_ctrl &= ~RBUF_64B_EN; +		rbuf_ctrl &= ~RBUF_64B_EN; +	} +	priv->desc_64b_en = desc_64b_en; + +	bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); +	bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); + +	return 0; +} + +static int bcmgenet_set_features(struct net_device *dev, +		netdev_features_t features) +{ +	netdev_features_t changed = features ^ dev->features; +	netdev_features_t wanted = dev->wanted_features; +	int ret = 0; + +	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) +		ret = bcmgenet_set_tx_csum(dev, wanted); +	if (changed & (NETIF_F_RXCSUM)) +		ret = bcmgenet_set_rx_csum(dev, wanted); + +	return ret; +} + +static u32 bcmgenet_get_msglevel(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	return priv->msg_enable; +} + +static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	priv->msg_enable = level; +} + +/* standard ethtool support functions. */ +enum bcmgenet_stat_type { +	BCMGENET_STAT_NETDEV = -1, +	BCMGENET_STAT_MIB_RX, +	BCMGENET_STAT_MIB_TX, +	BCMGENET_STAT_RUNT, +	BCMGENET_STAT_MISC, +}; + +struct bcmgenet_stats { +	char stat_string[ETH_GSTRING_LEN]; +	int stat_sizeof; +	int stat_offset; +	enum bcmgenet_stat_type type; +	/* reg offset from UMAC base for misc counters */ +	u16 reg_offset; +}; + +#define STAT_NETDEV(m) { \ +	.stat_string = __stringify(m), \ +	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \ +	.stat_offset = offsetof(struct net_device_stats, m), \ +	.type = BCMGENET_STAT_NETDEV, \ +} + +#define STAT_GENET_MIB(str, m, _type) { \ +	.stat_string = str, \ +	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ +	.stat_offset = offsetof(struct bcmgenet_priv, m), \ +	.type = _type, \ +} + +#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) +#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) +#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) + +#define STAT_GENET_MISC(str, m, offset) { \ +	.stat_string = str, \ +	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \ +	.stat_offset = offsetof(struct bcmgenet_priv, m), \ +	.type = BCMGENET_STAT_MISC, \ +	.reg_offset = offset, \ +} + + +/* There is a 0xC gap between the end of RX and beginning of TX stats and then + * between the end of TX stats and the beginning of the RX RUNT + */ +#define BCMGENET_STAT_OFFSET	0xc + +/* Hardware counters must be kept in sync because the order/offset + * is important here (order in structure declaration = order in hardware) + */ +static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { +	/* general stats */ +	STAT_NETDEV(rx_packets), +	STAT_NETDEV(tx_packets), +	STAT_NETDEV(rx_bytes), +	STAT_NETDEV(tx_bytes), +	STAT_NETDEV(rx_errors), +	STAT_NETDEV(tx_errors), +	STAT_NETDEV(rx_dropped), +	STAT_NETDEV(tx_dropped), +	STAT_NETDEV(multicast), +	/* UniMAC RSV counters */ +	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), +	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), +	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), +	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), +	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), +	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), +	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), +	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), +	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), +	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), +	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt), +	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes), +	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca), +	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca), +	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs), +	STAT_GENET_MIB_RX("rx_control", mib.rx.cf), +	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf), +	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo), +	STAT_GENET_MIB_RX("rx_align", mib.rx.aln), +	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr), +	STAT_GENET_MIB_RX("rx_code", mib.rx.cde), +	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr), +	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr), +	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr), +	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue), +	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok), +	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc), +	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp), +	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc), +	/* UniMAC TSV counters */ +	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), +	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), +	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), +	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), +	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), +	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), +	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), +	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), +	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), +	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), +	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts), +	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca), +	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca), +	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf), +	STAT_GENET_MIB_TX("tx_control", mib.tx.cf), +	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs), +	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr), +	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf), +	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf), +	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl), +	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl), +	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl), +	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl), +	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg), +	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl), +	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr), +	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes), +	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok), +	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc), +	/* UniMAC RUNT counters */ +	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt), +	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), +	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), +	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes), +	/* Misc UniMAC counters */ +	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, +			UMAC_RBUF_OVFL_CNT), +	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), +	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), +}; + +#define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats) + +static void bcmgenet_get_drvinfo(struct net_device *dev, +		struct ethtool_drvinfo *info) +{ +	strlcpy(info->driver, "bcmgenet", sizeof(info->driver)); +	strlcpy(info->version, "v2.0", sizeof(info->version)); +	info->n_stats = BCMGENET_STATS_LEN; + +} + +static int bcmgenet_get_sset_count(struct net_device *dev, int string_set) +{ +	switch (string_set) { +	case ETH_SS_STATS: +		return BCMGENET_STATS_LEN; +	default: +		return -EOPNOTSUPP; +	} +} + +static void bcmgenet_get_strings(struct net_device *dev, +				u32 stringset, u8 *data) +{ +	int i; + +	switch (stringset) { +	case ETH_SS_STATS: +		for (i = 0; i < BCMGENET_STATS_LEN; i++) { +			memcpy(data + i * ETH_GSTRING_LEN, +				bcmgenet_gstrings_stats[i].stat_string, +				ETH_GSTRING_LEN); +		} +		break; +	} +} + +static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) +{ +	int i, j = 0; + +	for (i = 0; i < BCMGENET_STATS_LEN; i++) { +		const struct bcmgenet_stats *s; +		u8 offset = 0; +		u32 val = 0; +		char *p; + +		s = &bcmgenet_gstrings_stats[i]; +		switch (s->type) { +		case BCMGENET_STAT_NETDEV: +			continue; +		case BCMGENET_STAT_MIB_RX: +		case BCMGENET_STAT_MIB_TX: +		case BCMGENET_STAT_RUNT: +			if (s->type != BCMGENET_STAT_MIB_RX) +				offset = BCMGENET_STAT_OFFSET; +			val = bcmgenet_umac_readl(priv, UMAC_MIB_START + +								j + offset); +			break; +		case BCMGENET_STAT_MISC: +			val = bcmgenet_umac_readl(priv, s->reg_offset); +			/* clear if overflowed */ +			if (val == ~0) +				bcmgenet_umac_writel(priv, 0, s->reg_offset); +			break; +		} + +		j += s->stat_sizeof; +		p = (char *)priv + s->stat_offset; +		*(u32 *)p = val; +	} +} + +static void bcmgenet_get_ethtool_stats(struct net_device *dev, +					struct ethtool_stats *stats, +					u64 *data) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int i; + +	if (netif_running(dev)) +		bcmgenet_update_mib_counters(priv); + +	for (i = 0; i < BCMGENET_STATS_LEN; i++) { +		const struct bcmgenet_stats *s; +		char *p; + +		s = &bcmgenet_gstrings_stats[i]; +		if (s->type == BCMGENET_STAT_NETDEV) +			p = (char *)&dev->stats; +		else +			p = (char *)priv; +		p += s->stat_offset; +		data[i] = *(u32 *)p; +	} +} + +/* standard ethtool support functions. */ +static struct ethtool_ops bcmgenet_ethtool_ops = { +	.get_strings		= bcmgenet_get_strings, +	.get_sset_count		= bcmgenet_get_sset_count, +	.get_ethtool_stats	= bcmgenet_get_ethtool_stats, +	.get_settings		= bcmgenet_get_settings, +	.set_settings		= bcmgenet_set_settings, +	.get_drvinfo		= bcmgenet_get_drvinfo, +	.get_link		= ethtool_op_get_link, +	.get_msglevel		= bcmgenet_get_msglevel, +	.set_msglevel		= bcmgenet_set_msglevel, +}; + +/* Power down the unimac, based on mode. */ +static void bcmgenet_power_down(struct bcmgenet_priv *priv, +				enum bcmgenet_power_mode mode) +{ +	u32 reg; + +	switch (mode) { +	case GENET_POWER_CABLE_SENSE: +		phy_detach(priv->phydev); +		break; + +	case GENET_POWER_PASSIVE: +		/* Power down LED */ +		bcmgenet_mii_reset(priv->dev); +		if (priv->hw_params->flags & GENET_HAS_EXT) { +			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +			reg |= (EXT_PWR_DOWN_PHY | +				EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); +			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +		} +		break; +	default: +		break; +	} +} + +static void bcmgenet_power_up(struct bcmgenet_priv *priv, +				enum bcmgenet_power_mode mode) +{ +	u32 reg; + +	if (!(priv->hw_params->flags & GENET_HAS_EXT)) +		return; + +	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); + +	switch (mode) { +	case GENET_POWER_PASSIVE: +		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY | +				EXT_PWR_DOWN_BIAS); +		/* fallthrough */ +	case GENET_POWER_CABLE_SENSE: +		/* enable APD */ +		reg |= EXT_PWR_DN_EN_LD; +		break; +	default: +		break; +	} + +	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +	bcmgenet_mii_reset(priv->dev); +} + +/* ioctl handle special commands that are not present in ethtool. */ +static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int val = 0; + +	if (!netif_running(dev)) +		return -EINVAL; + +	switch (cmd) { +	case SIOCGMIIPHY: +	case SIOCGMIIREG: +	case SIOCSMIIREG: +		if (!priv->phydev) +			val = -ENODEV; +		else +			val = phy_mii_ioctl(priv->phydev, rq, cmd); +		break; + +	default: +		val = -EINVAL; +		break; +	} + +	return val; +} + +static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, +					 struct bcmgenet_tx_ring *ring) +{ +	struct enet_cb *tx_cb_ptr; + +	tx_cb_ptr = ring->cbs; +	tx_cb_ptr += ring->write_ptr - ring->cb_ptr; +	tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE; +	/* Advancing local write pointer */ +	if (ring->write_ptr == ring->end_ptr) +		ring->write_ptr = ring->cb_ptr; +	else +		ring->write_ptr++; + +	return tx_cb_ptr; +} + +/* Simple helper to free a control block's resources */ +static void bcmgenet_free_cb(struct enet_cb *cb) +{ +	dev_kfree_skb_any(cb->skb); +	cb->skb = NULL; +	dma_unmap_addr_set(cb, dma_addr, 0); +} + +static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv, +						  struct bcmgenet_tx_ring *ring) +{ +	bcmgenet_intrl2_0_writel(priv, +			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, +			INTRL2_CPU_MASK_SET); +} + +static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv, +						 struct bcmgenet_tx_ring *ring) +{ +	bcmgenet_intrl2_0_writel(priv, +			UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE, +			INTRL2_CPU_MASK_CLEAR); +} + +static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv, +						struct bcmgenet_tx_ring *ring) +{ +	bcmgenet_intrl2_1_writel(priv, +			(1 << ring->index), INTRL2_CPU_MASK_CLEAR); +	priv->int1_mask &= ~(1 << ring->index); +} + +static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, +						struct bcmgenet_tx_ring *ring) +{ +	bcmgenet_intrl2_1_writel(priv, +			(1 << ring->index), INTRL2_CPU_MASK_SET); +	priv->int1_mask |= (1 << ring->index); +} + +/* Unlocked version of the reclaim routine */ +static void __bcmgenet_tx_reclaim(struct net_device *dev, +				struct bcmgenet_tx_ring *ring) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int last_tx_cn, last_c_index, num_tx_bds; +	struct enet_cb *tx_cb_ptr; +	struct netdev_queue *txq; +	unsigned int c_index; + +	/* Compute how many buffers are transmited since last xmit call */ +	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); +	txq = netdev_get_tx_queue(dev, ring->queue); + +	last_c_index = ring->c_index; +	num_tx_bds = ring->size; + +	c_index &= (num_tx_bds - 1); + +	if (c_index >= last_c_index) +		last_tx_cn = c_index - last_c_index; +	else +		last_tx_cn = num_tx_bds - last_c_index + c_index; + +	netif_dbg(priv, tx_done, dev, +			"%s ring=%d index=%d last_tx_cn=%d last_index=%d\n", +			__func__, ring->index, +			c_index, last_tx_cn, last_c_index); + +	/* Reclaim transmitted buffers */ +	while (last_tx_cn-- > 0) { +		tx_cb_ptr = ring->cbs + last_c_index; +		if (tx_cb_ptr->skb) { +			dev->stats.tx_bytes += tx_cb_ptr->skb->len; +			dma_unmap_single(&dev->dev, +					dma_unmap_addr(tx_cb_ptr, dma_addr), +					tx_cb_ptr->skb->len, +					DMA_TO_DEVICE); +			bcmgenet_free_cb(tx_cb_ptr); +		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { +			dev->stats.tx_bytes += +				dma_unmap_len(tx_cb_ptr, dma_len); +			dma_unmap_page(&dev->dev, +					dma_unmap_addr(tx_cb_ptr, dma_addr), +					dma_unmap_len(tx_cb_ptr, dma_len), +					DMA_TO_DEVICE); +			dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); +		} +		dev->stats.tx_packets++; +		ring->free_bds += 1; + +		last_c_index++; +		last_c_index &= (num_tx_bds - 1); +	} + +	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) +		ring->int_disable(priv, ring); + +	if (netif_tx_queue_stopped(txq)) +		netif_tx_wake_queue(txq); + +	ring->c_index = c_index; +} + +static void bcmgenet_tx_reclaim(struct net_device *dev, +		struct bcmgenet_tx_ring *ring) +{ +	unsigned long flags; + +	spin_lock_irqsave(&ring->lock, flags); +	__bcmgenet_tx_reclaim(dev, ring); +	spin_unlock_irqrestore(&ring->lock, flags); +} + +static void bcmgenet_tx_reclaim_all(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int i; + +	if (netif_is_multiqueue(dev)) { +		for (i = 0; i < priv->hw_params->tx_queues; i++) +			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); +	} + +	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); +} + +/* Transmits a single SKB (either head of a fragment or a single SKB) + * caller must hold priv->lock + */ +static int bcmgenet_xmit_single(struct net_device *dev, +				struct sk_buff *skb, +				u16 dma_desc_flags, +				struct bcmgenet_tx_ring *ring) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct device *kdev = &priv->pdev->dev; +	struct enet_cb *tx_cb_ptr; +	unsigned int skb_len; +	dma_addr_t mapping; +	u32 length_status; +	int ret; + +	tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + +	if (unlikely(!tx_cb_ptr)) +		BUG(); + +	tx_cb_ptr->skb = skb; + +	skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb); + +	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); +	ret = dma_mapping_error(kdev, mapping); +	if (ret) { +		netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); +		dev_kfree_skb(skb); +		return ret; +	} + +	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); +	dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); +	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | +			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | +			DMA_TX_APPEND_CRC; + +	if (skb->ip_summed == CHECKSUM_PARTIAL) +		length_status |= DMA_TX_DO_CSUM; + +	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); + +	/* Decrement total BD count and advance our write pointer */ +	ring->free_bds -= 1; +	ring->prod_index += 1; +	ring->prod_index &= DMA_P_INDEX_MASK; + +	return 0; +} + +/* Transmit a SKB fragement */ +static int bcmgenet_xmit_frag(struct net_device *dev, +				skb_frag_t *frag, +				u16 dma_desc_flags, +				struct bcmgenet_tx_ring *ring) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct device *kdev = &priv->pdev->dev; +	struct enet_cb *tx_cb_ptr; +	dma_addr_t mapping; +	int ret; + +	tx_cb_ptr = bcmgenet_get_txcb(priv, ring); + +	if (unlikely(!tx_cb_ptr)) +		BUG(); +	tx_cb_ptr->skb = NULL; + +	mapping = skb_frag_dma_map(kdev, frag, 0, +		skb_frag_size(frag), DMA_TO_DEVICE); +	ret = dma_mapping_error(kdev, mapping); +	if (ret) { +		netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", +				__func__); +		return ret; +	} + +	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); +	dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); + +	dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, +			(frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | +			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); + + +	ring->free_bds -= 1; +	ring->prod_index += 1; +	ring->prod_index &= DMA_P_INDEX_MASK; + +	return 0; +} + +/* Reallocate the SKB to put enough headroom in front of it and insert + * the transmit checksum offsets in the descriptors + */ +static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) +{ +	struct status_64 *status = NULL; +	struct sk_buff *new_skb; +	u16 offset; +	u8 ip_proto; +	u16 ip_ver; +	u32 tx_csum_info; + +	if (unlikely(skb_headroom(skb) < sizeof(*status))) { +		/* If 64 byte status block enabled, must make sure skb has +		 * enough headroom for us to insert 64B status block. +		 */ +		new_skb = skb_realloc_headroom(skb, sizeof(*status)); +		dev_kfree_skb(skb); +		if (!new_skb) { +			dev->stats.tx_errors++; +			dev->stats.tx_dropped++; +			return -ENOMEM; +		} +		skb = new_skb; +	} + +	skb_push(skb, sizeof(*status)); +	status = (struct status_64 *)skb->data; + +	if (skb->ip_summed  == CHECKSUM_PARTIAL) { +		ip_ver = htons(skb->protocol); +		switch (ip_ver) { +		case ETH_P_IP: +			ip_proto = ip_hdr(skb)->protocol; +			break; +		case ETH_P_IPV6: +			ip_proto = ipv6_hdr(skb)->nexthdr; +			break; +		default: +			return 0; +		} + +		offset = skb_checksum_start_offset(skb) - sizeof(*status); +		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | +				(offset + skb->csum_offset); + +		/* Set the length valid bit for TCP and UDP and just set +		 * the special UDP flag for IPv4, else just set to 0. +		 */ +		if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { +			tx_csum_info |= STATUS_TX_CSUM_LV; +			if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) +				tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; +		} else +			tx_csum_info = 0; + +		status->tx_csum_info = tx_csum_info; +	} + +	return 0; +} + +static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct bcmgenet_tx_ring *ring = NULL; +	struct netdev_queue *txq; +	unsigned long flags = 0; +	int nr_frags, index; +	u16 dma_desc_flags; +	int ret; +	int i; + +	index = skb_get_queue_mapping(skb); +	/* Mapping strategy: +	 * queue_mapping = 0, unclassified, packet xmited through ring16 +	 * queue_mapping = 1, goes to ring 0. (highest priority queue +	 * queue_mapping = 2, goes to ring 1. +	 * queue_mapping = 3, goes to ring 2. +	 * queue_mapping = 4, goes to ring 3. +	 */ +	if (index == 0) +		index = DESC_INDEX; +	else +		index -= 1; + +	nr_frags = skb_shinfo(skb)->nr_frags; +	ring = &priv->tx_rings[index]; +	txq = netdev_get_tx_queue(dev, ring->queue); + +	spin_lock_irqsave(&ring->lock, flags); +	if (ring->free_bds <= nr_frags + 1) { +		netif_tx_stop_queue(txq); +		netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", +				__func__, index, ring->queue); +		ret = NETDEV_TX_BUSY; +		goto out; +	} + +	if (skb_padto(skb, ETH_ZLEN)) { +		ret = NETDEV_TX_OK; +		goto out; +	} + +	/* set the SKB transmit checksum */ +	if (priv->desc_64b_en) { +		ret = bcmgenet_put_tx_csum(dev, skb); +		if (ret) { +			ret = NETDEV_TX_OK; +			goto out; +		} +	} + +	dma_desc_flags = DMA_SOP; +	if (nr_frags == 0) +		dma_desc_flags |= DMA_EOP; + +	/* Transmit single SKB or head of fragment list */ +	ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); +	if (ret) { +		ret = NETDEV_TX_OK; +		goto out; +	} + +	/* xmit fragment */ +	for (i = 0; i < nr_frags; i++) { +		ret = bcmgenet_xmit_frag(dev, +				&skb_shinfo(skb)->frags[i], +				(i == nr_frags - 1) ? DMA_EOP : 0, ring); +		if (ret) { +			ret = NETDEV_TX_OK; +			goto out; +		} +	} + +	skb_tx_timestamp(skb); + +	/* we kept a software copy of how much we should advance the TDMA +	 * producer index, now write it down to the hardware +	 */ +	bcmgenet_tdma_ring_writel(priv, ring->index, +			ring->prod_index, TDMA_PROD_INDEX); + +	if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { +		netif_tx_stop_queue(txq); +		ring->int_enable(priv, ring); +	} + +out: +	spin_unlock_irqrestore(&ring->lock, flags); + +	return ret; +} + + +static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, +				struct enet_cb *cb) +{ +	struct device *kdev = &priv->pdev->dev; +	struct sk_buff *skb; +	dma_addr_t mapping; +	int ret; + +	skb = netdev_alloc_skb(priv->dev, +				priv->rx_buf_len + SKB_ALIGNMENT); +	if (!skb) +		return -ENOMEM; + +	/* a caller did not release this control block */ +	WARN_ON(cb->skb != NULL); +	cb->skb = skb; +	mapping = dma_map_single(kdev, skb->data, +			priv->rx_buf_len, DMA_FROM_DEVICE); +	ret = dma_mapping_error(kdev, mapping); +	if (ret) { +		bcmgenet_free_cb(cb); +		netif_err(priv, rx_err, priv->dev, +				"%s DMA map failed\n", __func__); +		return ret; +	} + +	dma_unmap_addr_set(cb, dma_addr, mapping); +	/* assign packet, prepare descriptor, and advance pointer */ + +	dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); + +	/* turn on the newly assigned BD for DMA to use */ +	priv->rx_bd_assign_index++; +	priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); + +	priv->rx_bd_assign_ptr = priv->rx_bds + +		(priv->rx_bd_assign_index * DMA_DESC_SIZE); + +	return 0; +} + +/* bcmgenet_desc_rx - descriptor based rx process. + * this could be called from bottom half, or from NAPI polling method. + */ +static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, +				     unsigned int budget) +{ +	struct net_device *dev = priv->dev; +	struct enet_cb *cb; +	struct sk_buff *skb; +	u32 dma_length_status; +	unsigned long dma_flag; +	int len, err; +	unsigned int rxpktprocessed = 0, rxpkttoprocess; +	unsigned int p_index; +	unsigned int chksum_ok = 0; + +	p_index = bcmgenet_rdma_ring_readl(priv, +			DESC_INDEX, RDMA_PROD_INDEX); +	p_index &= DMA_P_INDEX_MASK; + +	if (p_index < priv->rx_c_index) +		rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - +			priv->rx_c_index + p_index; +	else +		rxpkttoprocess = p_index - priv->rx_c_index; + +	netif_dbg(priv, rx_status, dev, +		"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess); + +	while ((rxpktprocessed < rxpkttoprocess) && +			(rxpktprocessed < budget)) { + +		/* Unmap the packet contents such that we can use the +		 * RSV from the 64 bytes descriptor when enabled and save +		 * a 32-bits register read +		 */ +		cb = &priv->rx_cbs[priv->rx_read_ptr]; +		skb = cb->skb; +		dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), +				priv->rx_buf_len, DMA_FROM_DEVICE); + +		if (!priv->desc_64b_en) { +			dma_length_status = dmadesc_get_length_status(priv, +							priv->rx_bds + +							(priv->rx_read_ptr * +							 DMA_DESC_SIZE)); +		} else { +			struct status_64 *status; +			status = (struct status_64 *)skb->data; +			dma_length_status = status->length_status; +		} + +		/* DMA flags and length are still valid no matter how +		 * we got the Receive Status Vector (64B RSB or register) +		 */ +		dma_flag = dma_length_status & 0xffff; +		len = dma_length_status >> DMA_BUFLENGTH_SHIFT; + +		netif_dbg(priv, rx_status, dev, +			"%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n", +			__func__, p_index, priv->rx_c_index, priv->rx_read_ptr, +			dma_length_status); + +		rxpktprocessed++; + +		priv->rx_read_ptr++; +		priv->rx_read_ptr &= (priv->num_rx_bds - 1); + +		/* out of memory, just drop packets at the hardware level */ +		if (unlikely(!skb)) { +			dev->stats.rx_dropped++; +			dev->stats.rx_errors++; +			goto refill; +		} + +		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { +			netif_err(priv, rx_status, dev, +					"Droping fragmented packet!\n"); +			dev->stats.rx_dropped++; +			dev->stats.rx_errors++; +			dev_kfree_skb_any(cb->skb); +			cb->skb = NULL; +			goto refill; +		} +		/* report errors */ +		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR | +						DMA_RX_OV | +						DMA_RX_NO | +						DMA_RX_LG | +						DMA_RX_RXER))) { +			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n", +						(unsigned int)dma_flag); +			if (dma_flag & DMA_RX_CRC_ERROR) +				dev->stats.rx_crc_errors++; +			if (dma_flag & DMA_RX_OV) +				dev->stats.rx_over_errors++; +			if (dma_flag & DMA_RX_NO) +				dev->stats.rx_frame_errors++; +			if (dma_flag & DMA_RX_LG) +				dev->stats.rx_length_errors++; +			dev->stats.rx_dropped++; +			dev->stats.rx_errors++; + +			/* discard the packet and advance consumer index.*/ +			dev_kfree_skb_any(cb->skb); +			cb->skb = NULL; +			goto refill; +		} /* error packet */ + +		chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && +				priv->desc_rxchk_en; + +		skb_put(skb, len); +		if (priv->desc_64b_en) { +			skb_pull(skb, 64); +			len -= 64; +		} + +		if (likely(chksum_ok)) +			skb->ip_summed = CHECKSUM_UNNECESSARY; + +		/* remove hardware 2bytes added for IP alignment */ +		skb_pull(skb, 2); +		len -= 2; + +		if (priv->crc_fwd_en) { +			skb_trim(skb, len - ETH_FCS_LEN); +			len -= ETH_FCS_LEN; +		} + +		/*Finish setting up the received SKB and send it to the kernel*/ +		skb->protocol = eth_type_trans(skb, priv->dev); +		dev->stats.rx_packets++; +		dev->stats.rx_bytes += len; +		if (dma_flag & DMA_RX_MULT) +			dev->stats.multicast++; + +		/* Notify kernel */ +		napi_gro_receive(&priv->napi, skb); +		cb->skb = NULL; +		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n"); + +		/* refill RX path on the current control block */ +refill: +		err = bcmgenet_rx_refill(priv, cb); +		if (err) +			netif_err(priv, rx_err, dev, "Rx refill failed\n"); +	} + +	return rxpktprocessed; +} + +/* Assign skb to RX DMA descriptor. */ +static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv) +{ +	struct enet_cb *cb; +	int ret = 0; +	int i; + +	netif_dbg(priv, hw, priv->dev, "%s:\n", __func__); + +	/* loop here for each buffer needing assign */ +	for (i = 0; i < priv->num_rx_bds; i++) { +		cb = &priv->rx_cbs[priv->rx_bd_assign_index]; +		if (cb->skb) +			continue; + +		ret = bcmgenet_rx_refill(priv, cb); +		if (ret) +			break; + +	} + +	return ret; +} + +static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) +{ +	struct enet_cb *cb; +	int i; + +	for (i = 0; i < priv->num_rx_bds; i++) { +		cb = &priv->rx_cbs[i]; + +		if (dma_unmap_addr(cb, dma_addr)) { +			dma_unmap_single(&priv->dev->dev, +					dma_unmap_addr(cb, dma_addr), +					priv->rx_buf_len, DMA_FROM_DEVICE); +			dma_unmap_addr_set(cb, dma_addr, 0); +		} + +		if (cb->skb) +			bcmgenet_free_cb(cb); +	} +} + +static int reset_umac(struct bcmgenet_priv *priv) +{ +	struct device *kdev = &priv->pdev->dev; +	unsigned int timeout = 0; +	u32 reg; + +	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ +	bcmgenet_rbuf_ctrl_set(priv, 0); +	udelay(10); + +	/* disable MAC while updating its registers */ +	bcmgenet_umac_writel(priv, 0, UMAC_CMD); + +	/* issue soft reset, wait for it to complete */ +	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); +	while (timeout++ < 1000) { +		reg = bcmgenet_umac_readl(priv, UMAC_CMD); +		if (!(reg & CMD_SW_RESET)) +			return 0; + +		udelay(1); +	} + +	if (timeout == 1000) { +		dev_err(kdev, +			"timeout waiting for MAC to come out of resetn\n"); +		return -ETIMEDOUT; +	} + +	return 0; +} + +static int init_umac(struct bcmgenet_priv *priv) +{ +	struct device *kdev = &priv->pdev->dev; +	int ret; +	u32 reg, cpu_mask_clear; + +	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); + +	ret = reset_umac(priv); +	if (ret) +		return ret; + +	bcmgenet_umac_writel(priv, 0, UMAC_CMD); +	/* clear tx/rx counter */ +	bcmgenet_umac_writel(priv, +		MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL); +	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL); + +	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); + +	/* init rx registers, enable ip header optimization */ +	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL); +	reg |= RBUF_ALIGN_2B; +	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL); + +	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) +		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL); + +	/* Mask all interrupts.*/ +	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); +	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); +	bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + +	cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; + +	dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); + +	/* Monitor cable plug/unpluged event for internal PHY */ +	if (phy_is_internal(priv->phydev)) +		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); +	else if (priv->ext_phy) +		cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP); +	else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { +		reg = bcmgenet_bp_mc_get(priv); +		reg |= BIT(priv->hw_params->bp_in_en_shift); + +		/* bp_mask: back pressure mask */ +		if (netif_is_multiqueue(priv->dev)) +			reg |= priv->hw_params->bp_in_mask; +		else +			reg &= ~priv->hw_params->bp_in_mask; +		bcmgenet_bp_mc_set(priv, reg); +	} + +	/* Enable MDIO interrupts on GENET v3+ */ +	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) +		cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR; + +	bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, +		INTRL2_CPU_MASK_CLEAR); + +	/* Enable rx/tx engine.*/ +	dev_dbg(kdev, "done init umac\n"); + +	return 0; +} + +/* Initialize all house-keeping variables for a TX ring, along + * with corresponding hardware registers + */ +static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, +				  unsigned int index, unsigned int size, +				  unsigned int write_ptr, unsigned int end_ptr) +{ +	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; +	u32 words_per_bd = WORDS_PER_BD(priv); +	u32 flow_period_val = 0; +	unsigned int first_bd; + +	spin_lock_init(&ring->lock); +	ring->index = index; +	if (index == DESC_INDEX) { +		ring->queue = 0; +		ring->int_enable = bcmgenet_tx_ring16_int_enable; +		ring->int_disable = bcmgenet_tx_ring16_int_disable; +	} else { +		ring->queue = index + 1; +		ring->int_enable = bcmgenet_tx_ring_int_enable; +		ring->int_disable = bcmgenet_tx_ring_int_disable; +	} +	ring->cbs = priv->tx_cbs + write_ptr; +	ring->size = size; +	ring->c_index = 0; +	ring->free_bds = size; +	ring->write_ptr = write_ptr; +	ring->cb_ptr = write_ptr; +	ring->end_ptr = end_ptr - 1; +	ring->prod_index = 0; + +	/* Set flow period for ring != 16 */ +	if (index != DESC_INDEX) +		flow_period_val = ENET_MAX_MTU_SIZE << 16; + +	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); +	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX); +	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); +	/* Disable rate control for now */ +	bcmgenet_tdma_ring_writel(priv, index, flow_period_val, +			TDMA_FLOW_PERIOD); +	/* Unclassified traffic goes to ring 16 */ +	bcmgenet_tdma_ring_writel(priv, index, +			((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), +			DMA_RING_BUF_SIZE); + +	first_bd = write_ptr; + +	/* Set start and end address, read and write pointers */ +	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, +			DMA_START_ADDR); +	bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd, +			TDMA_READ_PTR); +	bcmgenet_tdma_ring_writel(priv, index, first_bd, +			TDMA_WRITE_PTR); +	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, +			DMA_END_ADDR); +} + +/* Initialize a RDMA ring */ +static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, +				  unsigned int index, unsigned int size) +{ +	u32 words_per_bd = WORDS_PER_BD(priv); +	int ret; + +	priv->num_rx_bds = TOTAL_DESC; +	priv->rx_bds = priv->base + priv->hw_params->rdma_offset; +	priv->rx_bd_assign_ptr = priv->rx_bds; +	priv->rx_bd_assign_index = 0; +	priv->rx_c_index = 0; +	priv->rx_read_ptr = 0; +	priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb), +				GFP_KERNEL); +	if (!priv->rx_cbs) +		return -ENOMEM; + +	ret = bcmgenet_alloc_rx_buffers(priv); +	if (ret) { +		kfree(priv->rx_cbs); +		return ret; +	} + +	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR); +	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); +	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); +	bcmgenet_rdma_ring_writel(priv, index, +		((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), +		DMA_RING_BUF_SIZE); +	bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR); +	bcmgenet_rdma_ring_writel(priv, index, +		words_per_bd * size - 1, DMA_END_ADDR); +	bcmgenet_rdma_ring_writel(priv, index, +			(DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) | +			DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH); +	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR); + +	return ret; +} + +/* init multi xmit queues, only available for GENET2+ + * the queue is partitioned as follows: + * + * queue 0 - 3 is priority based, each one has 32 descriptors, + * with queue 0 being the highest priority queue. + * + * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT + * descriptors: 256 - (number of tx queues * bds per queues) = 128 + * descriptors. + * + * The transmit control block pool is then partitioned as following: + * - tx_cbs[0...127] are for queue 16 + * - tx_ring_cbs[0] points to tx_cbs[128..159] + * - tx_ring_cbs[1] points to tx_cbs[160..191] + * - tx_ring_cbs[2] points to tx_cbs[192..223] + * - tx_ring_cbs[3] points to tx_cbs[224..255] + */ +static void bcmgenet_init_multiq(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	unsigned int i, dma_enable; +	u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0; + +	if (!netif_is_multiqueue(dev)) { +		netdev_warn(dev, "called with non multi queue aware HW\n"); +		return; +	} + +	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); +	dma_enable = dma_ctrl & DMA_EN; +	dma_ctrl &= ~DMA_EN; +	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + +	/* Enable strict priority arbiter mode */ +	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); + +	for (i = 0; i < priv->hw_params->tx_queues; i++) { +		/* first 64 tx_cbs are reserved for default tx queue +		 * (ring 16) +		 */ +		bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt, +					i * priv->hw_params->bds_cnt, +					(i + 1) * priv->hw_params->bds_cnt); + +		/* Configure ring as decriptor ring and setup priority */ +		ring_cfg |= 1 << i; +		dma_priority |= ((GENET_Q0_PRIORITY + i) << +				(GENET_MAX_MQ_CNT + 1) * i); +		dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT); +	} + +	/* Enable rings */ +	reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG); +	reg |= ring_cfg; +	bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG); + +	/* Use configured rings priority and set ring #16 priority */ +	reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY); +	reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20); +	reg |= dma_priority; +	bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY); + +	/* Configure ring as descriptor ring and re-enable DMA if enabled */ +	reg = bcmgenet_tdma_readl(priv, DMA_CTRL); +	reg |= dma_ctrl; +	if (dma_enable) +		reg |= DMA_EN; +	bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ +	int i; + +	/* disable DMA */ +	bcmgenet_rdma_writel(priv, 0, DMA_CTRL); +	bcmgenet_tdma_writel(priv, 0, DMA_CTRL); + +	for (i = 0; i < priv->num_tx_bds; i++) { +		if (priv->tx_cbs[i].skb != NULL) { +			dev_kfree_skb(priv->tx_cbs[i].skb); +			priv->tx_cbs[i].skb = NULL; +		} +	} + +	bcmgenet_free_rx_buffers(priv); +	kfree(priv->rx_cbs); +	kfree(priv->tx_cbs); +} + +/* init_edma: Initialize DMA control register */ +static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +{ +	int ret; + +	netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n"); + +	/* by default, enable ring 16 (descriptor based) */ +	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC); +	if (ret) { +		netdev_err(priv->dev, "failed to initialize RX ring\n"); +		return ret; +	} + +	/* init rDma */ +	bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + +	/* Init tDma */ +	bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE); + +	/* Initialize commont TX ring structures */ +	priv->tx_bds = priv->base + priv->hw_params->tdma_offset; +	priv->num_tx_bds = TOTAL_DESC; +	priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb), +				GFP_KERNEL); +	if (!priv->tx_cbs) { +		bcmgenet_fini_dma(priv); +		return -ENOMEM; +	} + +	/* initialize multi xmit queue */ +	bcmgenet_init_multiq(priv->dev); + +	/* initialize special ring 16 */ +	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT, +			priv->hw_params->tx_queues * priv->hw_params->bds_cnt, +			TOTAL_DESC); + +	return 0; +} + +/* NAPI polling method*/ +static int bcmgenet_poll(struct napi_struct *napi, int budget) +{ +	struct bcmgenet_priv *priv = container_of(napi, +			struct bcmgenet_priv, napi); +	unsigned int work_done; + +	/* tx reclaim */ +	bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + +	work_done = bcmgenet_desc_rx(priv, budget); + +	/* Advancing our consumer index*/ +	priv->rx_c_index += work_done; +	priv->rx_c_index &= DMA_C_INDEX_MASK; +	bcmgenet_rdma_ring_writel(priv, DESC_INDEX, +				priv->rx_c_index, RDMA_CONS_INDEX); +	if (work_done < budget) { +		napi_complete(napi); +		bcmgenet_intrl2_0_writel(priv, +			UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR); +	} + +	return work_done; +} + +/* Interrupt bottom half */ +static void bcmgenet_irq_task(struct work_struct *work) +{ +	struct bcmgenet_priv *priv = container_of( +			work, struct bcmgenet_priv, bcmgenet_irq_work); + +	netif_dbg(priv, intr, priv->dev, "%s\n", __func__); + +	/* Link UP/DOWN event */ +	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && +		(priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) { +		phy_mac_interrupt(priv->phydev, +			priv->irq0_stat & UMAC_IRQ_LINK_UP); +		priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN); +	} +} + +/* bcmgenet_isr1: interrupt handler for ring buffer. */ +static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) +{ +	struct bcmgenet_priv *priv = dev_id; +	unsigned int index; + +	/* Save irq status for bottom-half processing. */ +	priv->irq1_stat = +		bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & +		~priv->int1_mask; +	/* clear inerrupts*/ +	bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + +	netif_dbg(priv, intr, priv->dev, +		"%s: IRQ=0x%x\n", __func__, priv->irq1_stat); +	/* Check the MBDONE interrupts. +	 * packet is done, reclaim descriptors +	 */ +	if (priv->irq1_stat & 0x0000ffff) { +		index = 0; +		for (index = 0; index < 16; index++) { +			if (priv->irq1_stat & (1 << index)) +				bcmgenet_tx_reclaim(priv->dev, +						&priv->tx_rings[index]); +		} +	} +	return IRQ_HANDLED; +} + +/* bcmgenet_isr0: Handle various interrupts. */ +static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) +{ +	struct bcmgenet_priv *priv = dev_id; + +	/* Save irq status for bottom-half processing. */ +	priv->irq0_stat = +		bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) & +		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); +	/* clear inerrupts*/ +	bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + +	netif_dbg(priv, intr, priv->dev, +		"IRQ=0x%x\n", priv->irq0_stat); + +	if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) { +		/* We use NAPI(software interrupt throttling, if +		 * Rx Descriptor throttling is not used. +		 * Disable interrupt, will be enabled in the poll method. +		 */ +		if (likely(napi_schedule_prep(&priv->napi))) { +			bcmgenet_intrl2_0_writel(priv, +				UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET); +			__napi_schedule(&priv->napi); +		} +	} +	if (priv->irq0_stat & +			(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { +		/* Tx reclaim */ +		bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); +	} +	if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | +				UMAC_IRQ_PHY_DET_F | +				UMAC_IRQ_LINK_UP | +				UMAC_IRQ_LINK_DOWN | +				UMAC_IRQ_HFB_SM | +				UMAC_IRQ_HFB_MM | +				UMAC_IRQ_MPD_R)) { +		/* all other interested interrupts handled in bottom half */ +		schedule_work(&priv->bcmgenet_irq_work); +	} + +	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && +		priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { +		priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); +		wake_up(&priv->wq); +	} + +	return IRQ_HANDLED; +} + +static void bcmgenet_umac_reset(struct bcmgenet_priv *priv) +{ +	u32 reg; + +	reg = bcmgenet_rbuf_ctrl_get(priv); +	reg |= BIT(1); +	bcmgenet_rbuf_ctrl_set(priv, reg); +	udelay(10); + +	reg &= ~BIT(1); +	bcmgenet_rbuf_ctrl_set(priv, reg); +	udelay(10); +} + +static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv, +				  unsigned char *addr) +{ +	bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | +			(addr[2] << 8) | addr[3], UMAC_MAC0); +	bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); +} + +static int bcmgenet_wol_resume(struct bcmgenet_priv *priv) +{ +	int ret; + +	/* From WOL-enabled suspend, switch to regular clock */ +	clk_disable(priv->clk_wol); +	/* init umac registers to synchronize s/w with h/w */ +	ret = init_umac(priv); +	if (ret) +		return ret; + +	phy_init_hw(priv->phydev); +	/* Speed settings must be restored */ +	bcmgenet_mii_config(priv->dev); + +	return 0; +} + +/* Returns a reusable dma control register value */ +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +{ +	u32 reg; +	u32 dma_ctrl; + +	/* disable DMA */ +	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; +	reg = bcmgenet_tdma_readl(priv, DMA_CTRL); +	reg &= ~dma_ctrl; +	bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + +	reg = bcmgenet_rdma_readl(priv, DMA_CTRL); +	reg &= ~dma_ctrl; +	bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + +	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); +	udelay(10); +	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + +	return dma_ctrl; +} + +static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) +{ +	u32 reg; + +	reg = bcmgenet_rdma_readl(priv, DMA_CTRL); +	reg |= dma_ctrl; +	bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + +	reg = bcmgenet_tdma_readl(priv, DMA_CTRL); +	reg |= dma_ctrl; +	bcmgenet_tdma_writel(priv, reg, DMA_CTRL); +} + +static int bcmgenet_open(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	unsigned long dma_ctrl; +	u32 reg; +	int ret; + +	netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); + +	/* Turn on the clock */ +	if (!IS_ERR(priv->clk)) +		clk_prepare_enable(priv->clk); + +	/* take MAC out of reset */ +	bcmgenet_umac_reset(priv); + +	ret = init_umac(priv); +	if (ret) +		goto err_clk_disable; + +	/* disable ethernet MAC while updating its registers */ +	reg = bcmgenet_umac_readl(priv, UMAC_CMD); +	reg &= ~(CMD_TX_EN | CMD_RX_EN); +	bcmgenet_umac_writel(priv, reg, UMAC_CMD); + +	bcmgenet_set_hw_addr(priv, dev->dev_addr); + +	if (priv->wol_enabled) { +		ret = bcmgenet_wol_resume(priv); +		if (ret) +			return ret; +	} + +	if (phy_is_internal(priv->phydev)) { +		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +		reg |= EXT_ENERGY_DET_MASK; +		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +	} + +	/* Disable RX/TX DMA and flush TX queues */ +	dma_ctrl = bcmgenet_dma_disable(priv); + +	/* Reinitialize TDMA and RDMA and SW housekeeping */ +	ret = bcmgenet_init_dma(priv); +	if (ret) { +		netdev_err(dev, "failed to initialize DMA\n"); +		goto err_fini_dma; +	} + +	/* Always enable ring 16 - descriptor ring */ +	bcmgenet_enable_dma(priv, dma_ctrl); + +	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, +			dev->name, priv); +	if (ret < 0) { +		netdev_err(dev, "can't request IRQ %d\n", priv->irq0); +		goto err_fini_dma; +	} + +	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED, +				dev->name, priv); +	if (ret < 0) { +		netdev_err(dev, "can't request IRQ %d\n", priv->irq1); +		goto err_irq0; +	} + +	/* Start the network engine */ +	napi_enable(&priv->napi); + +	reg = bcmgenet_umac_readl(priv, UMAC_CMD); +	reg |= (CMD_TX_EN | CMD_RX_EN); +	bcmgenet_umac_writel(priv, reg, UMAC_CMD); + +	/* Make sure we reflect the value of CRC_CMD_FWD */ +	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + +	device_set_wakeup_capable(&dev->dev, 1); + +	if (phy_is_internal(priv->phydev)) +		bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + +	netif_tx_start_all_queues(dev); + +	phy_start(priv->phydev); + +	return 0; + +err_irq0: +	free_irq(priv->irq0, dev); +err_fini_dma: +	bcmgenet_fini_dma(priv); +err_clk_disable: +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); +	return ret; +} + +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ +	int ret = 0; +	int timeout = 0; +	u32 reg; + +	/* Disable TDMA to stop add more frames in TX DMA */ +	reg = bcmgenet_tdma_readl(priv, DMA_CTRL); +	reg &= ~DMA_EN; +	bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + +	/* Check TDMA status register to confirm TDMA is disabled */ +	while (timeout++ < DMA_TIMEOUT_VAL) { +		reg = bcmgenet_tdma_readl(priv, DMA_STATUS); +		if (reg & DMA_DISABLED) +			break; + +		udelay(1); +	} + +	if (timeout == DMA_TIMEOUT_VAL) { +		netdev_warn(priv->dev, +			"Timed out while disabling TX DMA\n"); +		ret = -ETIMEDOUT; +	} + +	/* Wait 10ms for packet drain in both tx and rx dma */ +	usleep_range(10000, 20000); + +	/* Disable RDMA */ +	reg = bcmgenet_rdma_readl(priv, DMA_CTRL); +	reg &= ~DMA_EN; +	bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + +	timeout = 0; +	/* Check RDMA status register to confirm RDMA is disabled */ +	while (timeout++ < DMA_TIMEOUT_VAL) { +		reg = bcmgenet_rdma_readl(priv, DMA_STATUS); +		if (reg & DMA_DISABLED) +			break; + +		udelay(1); +	} + +	if (timeout == DMA_TIMEOUT_VAL) { +		netdev_warn(priv->dev, +			"Timed out while disabling RX DMA\n"); +			ret = -ETIMEDOUT; +	} + +	return ret; +} + +static int bcmgenet_close(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int ret; +	u32 reg; + +	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); + +	phy_stop(priv->phydev); + +	/* Disable MAC receive */ +	reg = bcmgenet_umac_readl(priv, UMAC_CMD); +	reg &= ~CMD_RX_EN; +	bcmgenet_umac_writel(priv, reg, UMAC_CMD); + +	netif_tx_stop_all_queues(dev); + +	ret = bcmgenet_dma_teardown(priv); +	if (ret) +		return ret; + +	/* Disable MAC transmit. TX DMA disabled have to done before this */ +	reg = bcmgenet_umac_readl(priv, UMAC_CMD); +	reg &= ~CMD_TX_EN; +	bcmgenet_umac_writel(priv, reg, UMAC_CMD); + +	napi_disable(&priv->napi); + +	/* tx reclaim */ +	bcmgenet_tx_reclaim_all(dev); +	bcmgenet_fini_dma(priv); + +	free_irq(priv->irq0, priv); +	free_irq(priv->irq1, priv); + +	/* Wait for pending work items to complete - we are stopping +	 * the clock now. Since interrupts are disabled, no new work +	 * will be scheduled. +	 */ +	cancel_work_sync(&priv->bcmgenet_irq_work); + +	if (phy_is_internal(priv->phydev)) +		bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + +	if (priv->wol_enabled) +		clk_enable(priv->clk_wol); + +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); + +	return 0; +} + +static void bcmgenet_timeout(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + +	dev->trans_start = jiffies; + +	dev->stats.tx_errors++; + +	netif_tx_wake_all_queues(dev); +} + +#define MAX_MC_COUNT	16 + +static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv, +					 unsigned char *addr, +					 int *i, +					 int *mc) +{ +	u32 reg; + +	bcmgenet_umac_writel(priv, +			addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4)); +	bcmgenet_umac_writel(priv, +			addr[2] << 24 | addr[3] << 16 | +			addr[4] << 8 | addr[5], +			UMAC_MDF_ADDR + ((*i + 1) * 4)); +	reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL); +	reg |= (1 << (MAX_MC_COUNT - *mc)); +	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL); +	*i += 2; +	(*mc)++; +} + +static void bcmgenet_set_rx_mode(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct netdev_hw_addr *ha; +	int i, mc; +	u32 reg; + +	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags); + +	/* Promiscous mode */ +	reg = bcmgenet_umac_readl(priv, UMAC_CMD); +	if (dev->flags & IFF_PROMISC) { +		reg |= CMD_PROMISC; +		bcmgenet_umac_writel(priv, reg, UMAC_CMD); +		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); +		return; +	} else { +		reg &= ~CMD_PROMISC; +		bcmgenet_umac_writel(priv, reg, UMAC_CMD); +	} + +	/* UniMac doesn't support ALLMULTI */ +	if (dev->flags & IFF_ALLMULTI) { +		netdev_warn(dev, "ALLMULTI is not supported\n"); +		return; +	} + +	/* update MDF filter */ +	i = 0; +	mc = 0; +	/* Broadcast */ +	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc); +	/* my own address.*/ +	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc); +	/* Unicast list*/ +	if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc)) +		return; + +	if (!netdev_uc_empty(dev)) +		netdev_for_each_uc_addr(ha, dev) +			bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); +	/* Multicast */ +	if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc)) +		return; + +	netdev_for_each_mc_addr(ha, dev) +		bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc); +} + +/* Set the hardware MAC address. */ +static int bcmgenet_set_mac_addr(struct net_device *dev, void *p) +{ +	struct sockaddr *addr = p; + +	/* Setting the MAC address at the hardware level is not possible +	 * without disabling the UniMAC RX/TX enable bits. +	 */ +	if (netif_running(dev)) +		return -EBUSY; + +	ether_addr_copy(dev->dev_addr, addr->sa_data); + +	return 0; +} + +static const struct net_device_ops bcmgenet_netdev_ops = { +	.ndo_open		= bcmgenet_open, +	.ndo_stop		= bcmgenet_close, +	.ndo_start_xmit		= bcmgenet_xmit, +	.ndo_tx_timeout		= bcmgenet_timeout, +	.ndo_set_rx_mode	= bcmgenet_set_rx_mode, +	.ndo_set_mac_address	= bcmgenet_set_mac_addr, +	.ndo_do_ioctl		= bcmgenet_ioctl, +	.ndo_set_features	= bcmgenet_set_features, +}; + +/* Array of GENET hardware parameters/characteristics */ +static struct bcmgenet_hw_params bcmgenet_hw_params[] = { +	[GENET_V1] = { +		.tx_queues = 0, +		.rx_queues = 0, +		.bds_cnt = 0, +		.bp_in_en_shift = 16, +		.bp_in_mask = 0xffff, +		.hfb_filter_cnt = 16, +		.qtag_mask = 0x1F, +		.hfb_offset = 0x1000, +		.rdma_offset = 0x2000, +		.tdma_offset = 0x3000, +		.words_per_bd = 2, +	}, +	[GENET_V2] = { +		.tx_queues = 4, +		.rx_queues = 4, +		.bds_cnt = 32, +		.bp_in_en_shift = 16, +		.bp_in_mask = 0xffff, +		.hfb_filter_cnt = 16, +		.qtag_mask = 0x1F, +		.tbuf_offset = 0x0600, +		.hfb_offset = 0x1000, +		.hfb_reg_offset = 0x2000, +		.rdma_offset = 0x3000, +		.tdma_offset = 0x4000, +		.words_per_bd = 2, +		.flags = GENET_HAS_EXT, +	}, +	[GENET_V3] = { +		.tx_queues = 4, +		.rx_queues = 4, +		.bds_cnt = 32, +		.bp_in_en_shift = 17, +		.bp_in_mask = 0x1ffff, +		.hfb_filter_cnt = 48, +		.qtag_mask = 0x3F, +		.tbuf_offset = 0x0600, +		.hfb_offset = 0x8000, +		.hfb_reg_offset = 0xfc00, +		.rdma_offset = 0x10000, +		.tdma_offset = 0x11000, +		.words_per_bd = 2, +		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR, +	}, +	[GENET_V4] = { +		.tx_queues = 4, +		.rx_queues = 4, +		.bds_cnt = 32, +		.bp_in_en_shift = 17, +		.bp_in_mask = 0x1ffff, +		.hfb_filter_cnt = 48, +		.qtag_mask = 0x3F, +		.tbuf_offset = 0x0600, +		.hfb_offset = 0x8000, +		.hfb_reg_offset = 0xfc00, +		.rdma_offset = 0x2000, +		.tdma_offset = 0x4000, +		.words_per_bd = 3, +		.flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR, +	}, +}; + +/* Infer hardware parameters from the detected GENET version */ +static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) +{ +	struct bcmgenet_hw_params *params; +	u32 reg; +	u8 major; + +	if (GENET_IS_V4(priv)) { +		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; +		genet_dma_ring_regs = genet_dma_ring_regs_v4; +		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; +		priv->version = GENET_V4; +	} else if (GENET_IS_V3(priv)) { +		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; +		genet_dma_ring_regs = genet_dma_ring_regs_v123; +		priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; +		priv->version = GENET_V3; +	} else if (GENET_IS_V2(priv)) { +		bcmgenet_dma_regs = bcmgenet_dma_regs_v2; +		genet_dma_ring_regs = genet_dma_ring_regs_v123; +		priv->dma_rx_chk_bit = DMA_RX_CHK_V12; +		priv->version = GENET_V2; +	} else if (GENET_IS_V1(priv)) { +		bcmgenet_dma_regs = bcmgenet_dma_regs_v1; +		genet_dma_ring_regs = genet_dma_ring_regs_v123; +		priv->dma_rx_chk_bit = DMA_RX_CHK_V12; +		priv->version = GENET_V1; +	} + +	/* enum genet_version starts at 1 */ +	priv->hw_params = &bcmgenet_hw_params[priv->version]; +	params = priv->hw_params; + +	/* Read GENET HW version */ +	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); +	major = (reg >> 24 & 0x0f); +	if (major == 5) +		major = 4; +	else if (major == 0) +		major = 1; +	if (major != priv->version) { +		dev_err(&priv->pdev->dev, +			"GENET version mismatch, got: %d, configured for: %d\n", +			major, priv->version); +	} + +	/* Print the GENET core version */ +	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, +		major, (reg >> 16) & 0x0f, reg & 0xffff); + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +	if (!(params->flags & GENET_HAS_40BITS)) +		pr_warn("GENET does not support 40-bits PA\n"); +#endif + +	pr_debug("Configuration for version: %d\n" +		"TXq: %1d, RXq: %1d, BDs: %1d\n" +		"BP << en: %2d, BP msk: 0x%05x\n" +		"HFB count: %2d, QTAQ msk: 0x%05x\n" +		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n" +		"RDMA: 0x%05x, TDMA: 0x%05x\n" +		"Words/BD: %d\n", +		priv->version, +		params->tx_queues, params->rx_queues, params->bds_cnt, +		params->bp_in_en_shift, params->bp_in_mask, +		params->hfb_filter_cnt, params->qtag_mask, +		params->tbuf_offset, params->hfb_offset, +		params->hfb_reg_offset, +		params->rdma_offset, params->tdma_offset, +		params->words_per_bd); +} + +static const struct of_device_id bcmgenet_match[] = { +	{ .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 }, +	{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 }, +	{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 }, +	{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, +	{ }, +}; + +static int bcmgenet_probe(struct platform_device *pdev) +{ +	struct device_node *dn = pdev->dev.of_node; +	const struct of_device_id *of_id; +	struct bcmgenet_priv *priv; +	struct net_device *dev; +	const void *macaddr; +	struct resource *r; +	int err = -EIO; + +	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */ +	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1); +	if (!dev) { +		dev_err(&pdev->dev, "can't allocate net device\n"); +		return -ENOMEM; +	} + +	of_id = of_match_node(bcmgenet_match, dn); +	if (!of_id) +		return -EINVAL; + +	priv = netdev_priv(dev); +	priv->irq0 = platform_get_irq(pdev, 0); +	priv->irq1 = platform_get_irq(pdev, 1); +	if (!priv->irq0 || !priv->irq1) { +		dev_err(&pdev->dev, "can't find IRQs\n"); +		err = -EINVAL; +		goto err; +	} + +	macaddr = of_get_mac_address(dn); +	if (!macaddr) { +		dev_err(&pdev->dev, "can't find MAC address\n"); +		err = -EINVAL; +		goto err; +	} + +	r = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	priv->base = devm_ioremap_resource(&pdev->dev, r); +	if (IS_ERR(priv->base)) { +		err = PTR_ERR(priv->base); +		goto err; +	} + +	SET_NETDEV_DEV(dev, &pdev->dev); +	dev_set_drvdata(&pdev->dev, dev); +	ether_addr_copy(dev->dev_addr, macaddr); +	dev->watchdog_timeo = 2 * HZ; +	dev->ethtool_ops = &bcmgenet_ethtool_ops; +	dev->netdev_ops = &bcmgenet_netdev_ops; +	netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64); + +	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); + +	/* Set hardware features */ +	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | +		NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + +	/* Set the needed headroom to account for any possible +	 * features enabling/disabling at runtime +	 */ +	dev->needed_headroom += 64; + +	netdev_boot_setup_check(dev); + +	priv->dev = dev; +	priv->pdev = pdev; +	priv->version = (enum bcmgenet_version)of_id->data; + +	bcmgenet_set_hw_params(priv); + +	/* Mii wait queue */ +	init_waitqueue_head(&priv->wq); +	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ +	priv->rx_buf_len = RX_BUF_LENGTH; +	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task); + +	priv->clk = devm_clk_get(&priv->pdev->dev, "enet"); +	if (IS_ERR(priv->clk)) +		dev_warn(&priv->pdev->dev, "failed to get enet clock\n"); + +	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol"); +	if (IS_ERR(priv->clk_wol)) +		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n"); + +	if (!IS_ERR(priv->clk)) +		clk_prepare_enable(priv->clk); + +	err = reset_umac(priv); +	if (err) +		goto err_clk_disable; + +	err = bcmgenet_mii_init(dev); +	if (err) +		goto err_clk_disable; + +	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues +	 * just the ring 16 descriptor based TX +	 */ +	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); +	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + +	/* libphy will determine the link state */ +	netif_carrier_off(dev); + +	/* Turn off the main clock, WOL clock is handled separately */ +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); + +	err = register_netdev(dev); +	if (err) +		goto err; + +	return err; + +err_clk_disable: +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); +err: +	free_netdev(dev); +	return err; +} + +static int bcmgenet_remove(struct platform_device *pdev) +{ +	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev); + +	dev_set_drvdata(&pdev->dev, NULL); +	unregister_netdev(priv->dev); +	bcmgenet_mii_exit(priv->dev); +	free_netdev(priv->dev); + +	return 0; +} + + +static struct platform_driver bcmgenet_driver = { +	.probe	= bcmgenet_probe, +	.remove	= bcmgenet_remove, +	.driver	= { +		.name	= "bcmgenet", +		.owner	= THIS_MODULE, +		.of_match_table = bcmgenet_match, +	}, +}; +module_platform_driver(bcmgenet_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver"); +MODULE_ALIAS("platform:bcmgenet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h new file mode 100644 index 00000000000..e23c993b136 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -0,0 +1,628 @@ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * +*/ +#ifndef __BCMGENET_H__ +#define __BCMGENET_H__ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/clk.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> + +/* total number of Buffer Descriptors, same for Rx/Tx */ +#define TOTAL_DESC				256 + +/* which ring is descriptor based */ +#define DESC_INDEX				16 + +/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528. + * 1536 is multiple of 256 bytes + */ +#define ENET_BRCM_TAG_LEN	6 +#define ENET_PAD		8 +#define ENET_MAX_MTU_SIZE	(ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \ +				 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD) +#define DMA_MAX_BURST_LENGTH    0x10 + +/* misc. configuration */ +#define CLEAR_ALL_HFB			0xFF +#define DMA_FC_THRESH_HI		(TOTAL_DESC >> 4) +#define DMA_FC_THRESH_LO		5 + +/* 64B receive/transmit status block */ +struct status_64 { +	u32	length_status;		/* length and peripheral status */ +	u32	ext_status;		/* Extended status*/ +	u32	rx_csum;		/* partial rx checksum */ +	u32	unused1[9];		/* unused */ +	u32	tx_csum_info;		/* Tx checksum info. */ +	u32	unused2[3];		/* unused */ +}; + +/* Rx status bits */ +#define STATUS_RX_EXT_MASK		0x1FFFFF +#define STATUS_RX_CSUM_MASK		0xFFFF +#define STATUS_RX_CSUM_OK		0x10000 +#define STATUS_RX_CSUM_FR		0x20000 +#define STATUS_RX_PROTO_TCP		0 +#define STATUS_RX_PROTO_UDP		1 +#define STATUS_RX_PROTO_ICMP		2 +#define STATUS_RX_PROTO_OTHER		3 +#define STATUS_RX_PROTO_MASK		3 +#define STATUS_RX_PROTO_SHIFT		18 +#define STATUS_FILTER_INDEX_MASK	0xFFFF +/* Tx status bits */ +#define STATUS_TX_CSUM_START_MASK	0X7FFF +#define STATUS_TX_CSUM_START_SHIFT	16 +#define STATUS_TX_CSUM_PROTO_UDP	0x8000 +#define STATUS_TX_CSUM_OFFSET_MASK	0x7FFF +#define STATUS_TX_CSUM_LV		0x80000000 + +/* DMA Descriptor */ +#define DMA_DESC_LENGTH_STATUS	0x00	/* in bytes of data in buffer */ +#define DMA_DESC_ADDRESS_LO	0x04	/* lower bits of PA */ +#define DMA_DESC_ADDRESS_HI	0x08	/* upper 32 bits of PA, GENETv4+ */ + +/* Rx/Tx common counter group */ +struct bcmgenet_pkt_counters { +	u32	cnt_64;		/* RO Received/Transmited 64 bytes packet */ +	u32	cnt_127;	/* RO Rx/Tx 127 bytes packet */ +	u32	cnt_255;	/* RO Rx/Tx 65-255 bytes packet */ +	u32	cnt_511;	/* RO Rx/Tx 256-511 bytes packet */ +	u32	cnt_1023;	/* RO Rx/Tx 512-1023 bytes packet */ +	u32	cnt_1518;	/* RO Rx/Tx 1024-1518 bytes packet */ +	u32	cnt_mgv;	/* RO Rx/Tx 1519-1522 good VLAN packet */ +	u32	cnt_2047;	/* RO Rx/Tx 1522-2047 bytes packet*/ +	u32	cnt_4095;	/* RO Rx/Tx 2048-4095 bytes packet*/ +	u32	cnt_9216;	/* RO Rx/Tx 4096-9216 bytes packet*/ +}; + +/* RSV, Receive Status Vector */ +struct bcmgenet_rx_counters { +	struct  bcmgenet_pkt_counters pkt_cnt; +	u32	pkt;		/* RO (0x428) Received pkt count*/ +	u32	bytes;		/* RO Received byte count */ +	u32	mca;		/* RO # of Received multicast pkt */ +	u32	bca;		/* RO # of Receive broadcast pkt */ +	u32	fcs;		/* RO # of Received FCS error  */ +	u32	cf;		/* RO # of Received control frame pkt*/ +	u32	pf;		/* RO # of Received pause frame pkt */ +	u32	uo;		/* RO # of unknown op code pkt */ +	u32	aln;		/* RO # of alignment error count */ +	u32	flr;		/* RO # of frame length out of range count */ +	u32	cde;		/* RO # of code error pkt */ +	u32	fcr;		/* RO # of carrier sense error pkt */ +	u32	ovr;		/* RO # of oversize pkt*/ +	u32	jbr;		/* RO # of jabber count */ +	u32	mtue;		/* RO # of MTU error pkt*/ +	u32	pok;		/* RO # of Received good pkt */ +	u32	uc;		/* RO # of unicast pkt */ +	u32	ppp;		/* RO # of PPP pkt */ +	u32	rcrc;		/* RO (0x470),# of CRC match pkt */ +}; + +/* TSV, Transmit Status Vector */ +struct bcmgenet_tx_counters { +	struct bcmgenet_pkt_counters pkt_cnt; +	u32	pkts;		/* RO (0x4a8) Transmited pkt */ +	u32	mca;		/* RO # of xmited multicast pkt */ +	u32	bca;		/* RO # of xmited broadcast pkt */ +	u32	pf;		/* RO # of xmited pause frame count */ +	u32	cf;		/* RO # of xmited control frame count */ +	u32	fcs;		/* RO # of xmited FCS error count */ +	u32	ovr;		/* RO # of xmited oversize pkt */ +	u32	drf;		/* RO # of xmited deferral pkt */ +	u32	edf;		/* RO # of xmited Excessive deferral pkt*/ +	u32	scl;		/* RO # of xmited single collision pkt */ +	u32	mcl;		/* RO # of xmited multiple collision pkt*/ +	u32	lcl;		/* RO # of xmited late collision pkt */ +	u32	ecl;		/* RO # of xmited excessive collision pkt*/ +	u32	frg;		/* RO # of xmited fragments pkt*/ +	u32	ncl;		/* RO # of xmited total collision count */ +	u32	jbr;		/* RO # of xmited jabber count*/ +	u32	bytes;		/* RO # of xmited byte count */ +	u32	pok;		/* RO # of xmited good pkt */ +	u32	uc;		/* RO (0x0x4f0)# of xmited unitcast pkt */ +}; + +struct bcmgenet_mib_counters { +	struct bcmgenet_rx_counters rx; +	struct bcmgenet_tx_counters tx; +	u32	rx_runt_cnt; +	u32	rx_runt_fcs; +	u32	rx_runt_fcs_align; +	u32	rx_runt_bytes; +	u32	rbuf_ovflow_cnt; +	u32	rbuf_err_cnt; +	u32	mdf_err_cnt; +}; + +#define UMAC_HD_BKP_CTRL		0x004 +#define	 HD_FC_EN			(1 << 0) +#define  HD_FC_BKOFF_OK			(1 << 1) +#define  IPG_CONFIG_RX_SHIFT		2 +#define  IPG_CONFIG_RX_MASK		0x1F + +#define UMAC_CMD			0x008 +#define  CMD_TX_EN			(1 << 0) +#define  CMD_RX_EN			(1 << 1) +#define  UMAC_SPEED_10			0 +#define  UMAC_SPEED_100			1 +#define  UMAC_SPEED_1000		2 +#define  UMAC_SPEED_2500		3 +#define  CMD_SPEED_SHIFT		2 +#define  CMD_SPEED_MASK			3 +#define  CMD_PROMISC			(1 << 4) +#define  CMD_PAD_EN			(1 << 5) +#define  CMD_CRC_FWD			(1 << 6) +#define  CMD_PAUSE_FWD			(1 << 7) +#define  CMD_RX_PAUSE_IGNORE		(1 << 8) +#define  CMD_TX_ADDR_INS		(1 << 9) +#define  CMD_HD_EN			(1 << 10) +#define  CMD_SW_RESET			(1 << 13) +#define  CMD_LCL_LOOP_EN		(1 << 15) +#define  CMD_AUTO_CONFIG		(1 << 22) +#define  CMD_CNTL_FRM_EN		(1 << 23) +#define  CMD_NO_LEN_CHK			(1 << 24) +#define  CMD_RMT_LOOP_EN		(1 << 25) +#define  CMD_PRBL_EN			(1 << 27) +#define  CMD_TX_PAUSE_IGNORE		(1 << 28) +#define  CMD_TX_RX_EN			(1 << 29) +#define  CMD_RUNT_FILTER_DIS		(1 << 30) + +#define UMAC_MAC0			0x00C +#define UMAC_MAC1			0x010 +#define UMAC_MAX_FRAME_LEN		0x014 + +#define UMAC_TX_FLUSH			0x334 + +#define UMAC_MIB_START			0x400 + +#define UMAC_MDIO_CMD			0x614 +#define  MDIO_START_BUSY		(1 << 29) +#define  MDIO_READ_FAIL			(1 << 28) +#define  MDIO_RD			(2 << 26) +#define  MDIO_WR			(1 << 26) +#define  MDIO_PMD_SHIFT			21 +#define  MDIO_PMD_MASK			0x1F +#define  MDIO_REG_SHIFT			16 +#define  MDIO_REG_MASK			0x1F + +#define UMAC_RBUF_OVFL_CNT		0x61C + +#define UMAC_MPD_CTRL			0x620 +#define  MPD_EN				(1 << 0) +#define  MPD_PW_EN			(1 << 27) +#define  MPD_MSEQ_LEN_SHIFT		16 +#define  MPD_MSEQ_LEN_MASK		0xFF + +#define UMAC_MPD_PW_MS			0x624 +#define UMAC_MPD_PW_LS			0x628 +#define UMAC_RBUF_ERR_CNT		0x634 +#define UMAC_MDF_ERR_CNT		0x638 +#define UMAC_MDF_CTRL			0x650 +#define UMAC_MDF_ADDR			0x654 +#define UMAC_MIB_CTRL			0x580 +#define  MIB_RESET_RX			(1 << 0) +#define  MIB_RESET_RUNT			(1 << 1) +#define  MIB_RESET_TX			(1 << 2) + +#define RBUF_CTRL			0x00 +#define  RBUF_64B_EN			(1 << 0) +#define  RBUF_ALIGN_2B			(1 << 1) +#define  RBUF_BAD_DIS			(1 << 2) + +#define RBUF_STATUS			0x0C +#define  RBUF_STATUS_WOL		(1 << 0) +#define  RBUF_STATUS_MPD_INTR_ACTIVE	(1 << 1) +#define  RBUF_STATUS_ACPI_INTR_ACTIVE	(1 << 2) + +#define RBUF_CHK_CTRL			0x14 +#define  RBUF_RXCHK_EN			(1 << 0) +#define  RBUF_SKIP_FCS			(1 << 4) + +#define RBUF_TBUF_SIZE_CTRL		0xb4 + +#define RBUF_HFB_CTRL_V1		0x38 +#define  RBUF_HFB_FILTER_EN_SHIFT	16 +#define  RBUF_HFB_FILTER_EN_MASK	0xffff0000 +#define  RBUF_HFB_EN			(1 << 0) +#define  RBUF_HFB_256B			(1 << 1) +#define  RBUF_ACPI_EN			(1 << 2) + +#define RBUF_HFB_LEN_V1			0x3C +#define  RBUF_FLTR_LEN_MASK		0xFF +#define  RBUF_FLTR_LEN_SHIFT		8 + +#define TBUF_CTRL			0x00 +#define TBUF_BP_MC			0x0C + +#define TBUF_CTRL_V1			0x80 +#define TBUF_BP_MC_V1			0xA0 + +#define HFB_CTRL			0x00 +#define HFB_FLT_ENABLE_V3PLUS		0x04 +#define HFB_FLT_LEN_V2			0x04 +#define HFB_FLT_LEN_V3PLUS		0x1C + +/* uniMac intrl2 registers */ +#define INTRL2_CPU_STAT			0x00 +#define INTRL2_CPU_SET			0x04 +#define INTRL2_CPU_CLEAR		0x08 +#define INTRL2_CPU_MASK_STATUS		0x0C +#define INTRL2_CPU_MASK_SET		0x10 +#define INTRL2_CPU_MASK_CLEAR		0x14 + +/* INTRL2 instance 0 definitions */ +#define UMAC_IRQ_SCB			(1 << 0) +#define UMAC_IRQ_EPHY			(1 << 1) +#define UMAC_IRQ_PHY_DET_R		(1 << 2) +#define UMAC_IRQ_PHY_DET_F		(1 << 3) +#define UMAC_IRQ_LINK_UP		(1 << 4) +#define UMAC_IRQ_LINK_DOWN		(1 << 5) +#define UMAC_IRQ_UMAC			(1 << 6) +#define UMAC_IRQ_UMAC_TSV		(1 << 7) +#define UMAC_IRQ_TBUF_UNDERRUN		(1 << 8) +#define UMAC_IRQ_RBUF_OVERFLOW		(1 << 9) +#define UMAC_IRQ_HFB_SM			(1 << 10) +#define UMAC_IRQ_HFB_MM			(1 << 11) +#define UMAC_IRQ_MPD_R			(1 << 12) +#define UMAC_IRQ_RXDMA_MBDONE		(1 << 13) +#define UMAC_IRQ_RXDMA_PDONE		(1 << 14) +#define UMAC_IRQ_RXDMA_BDONE		(1 << 15) +#define UMAC_IRQ_TXDMA_MBDONE		(1 << 16) +#define UMAC_IRQ_TXDMA_PDONE		(1 << 17) +#define UMAC_IRQ_TXDMA_BDONE		(1 << 18) +/* Only valid for GENETv3+ */ +#define UMAC_IRQ_MDIO_DONE		(1 << 23) +#define UMAC_IRQ_MDIO_ERROR		(1 << 24) + +/* Register block offsets */ +#define GENET_SYS_OFF			0x0000 +#define GENET_GR_BRIDGE_OFF		0x0040 +#define GENET_EXT_OFF			0x0080 +#define GENET_INTRL2_0_OFF		0x0200 +#define GENET_INTRL2_1_OFF		0x0240 +#define GENET_RBUF_OFF			0x0300 +#define GENET_UMAC_OFF			0x0800 + +/* SYS block offsets and register definitions */ +#define SYS_REV_CTRL			0x00 +#define SYS_PORT_CTRL			0x04 +#define  PORT_MODE_INT_EPHY		0 +#define  PORT_MODE_INT_GPHY		1 +#define  PORT_MODE_EXT_EPHY		2 +#define  PORT_MODE_EXT_GPHY		3 +#define  PORT_MODE_EXT_RVMII_25		(4 | BIT(4)) +#define  PORT_MODE_EXT_RVMII_50		4 +#define  LED_ACT_SOURCE_MAC		(1 << 9) + +#define SYS_RBUF_FLUSH_CTRL		0x08 +#define SYS_TBUF_FLUSH_CTRL		0x0C +#define RBUF_FLUSH_CTRL_V1		0x04 + +/* Ext block register offsets and definitions */ +#define EXT_EXT_PWR_MGMT		0x00 +#define  EXT_PWR_DOWN_BIAS		(1 << 0) +#define  EXT_PWR_DOWN_DLL		(1 << 1) +#define  EXT_PWR_DOWN_PHY		(1 << 2) +#define  EXT_PWR_DN_EN_LD		(1 << 3) +#define  EXT_ENERGY_DET			(1 << 4) +#define  EXT_IDDQ_FROM_PHY		(1 << 5) +#define  EXT_PHY_RESET			(1 << 8) +#define  EXT_ENERGY_DET_MASK		(1 << 12) + +#define EXT_RGMII_OOB_CTRL		0x0C +#define  RGMII_LINK			(1 << 4) +#define  OOB_DISABLE			(1 << 5) +#define  RGMII_MODE_EN			(1 << 6) +#define  ID_MODE_DIS			(1 << 16) + +#define EXT_GPHY_CTRL			0x1C +#define  EXT_CFG_IDDQ_BIAS		(1 << 0) +#define  EXT_CFG_PWR_DOWN		(1 << 1) +#define  EXT_GPHY_RESET			(1 << 5) + +/* DMA rings size */ +#define DMA_RING_SIZE			(0x40) +#define DMA_RINGS_SIZE			(DMA_RING_SIZE * (DESC_INDEX + 1)) + +/* DMA registers common definitions */ +#define DMA_RW_POINTER_MASK		0x1FF +#define DMA_P_INDEX_DISCARD_CNT_MASK	0xFFFF +#define DMA_P_INDEX_DISCARD_CNT_SHIFT	16 +#define DMA_BUFFER_DONE_CNT_MASK	0xFFFF +#define DMA_BUFFER_DONE_CNT_SHIFT	16 +#define DMA_P_INDEX_MASK		0xFFFF +#define DMA_C_INDEX_MASK		0xFFFF + +/* DMA ring size register */ +#define DMA_RING_SIZE_MASK		0xFFFF +#define DMA_RING_SIZE_SHIFT		16 +#define DMA_RING_BUFFER_SIZE_MASK	0xFFFF + +/* DMA interrupt threshold register */ +#define DMA_INTR_THRESHOLD_MASK		0x00FF + +/* DMA XON/XOFF register */ +#define DMA_XON_THREHOLD_MASK		0xFFFF +#define DMA_XOFF_THRESHOLD_MASK		0xFFFF +#define DMA_XOFF_THRESHOLD_SHIFT	16 + +/* DMA flow period register */ +#define DMA_FLOW_PERIOD_MASK		0xFFFF +#define DMA_MAX_PKT_SIZE_MASK		0xFFFF +#define DMA_MAX_PKT_SIZE_SHIFT		16 + + +/* DMA control register */ +#define DMA_EN				(1 << 0) +#define DMA_RING_BUF_EN_SHIFT		0x01 +#define DMA_RING_BUF_EN_MASK		0xFFFF +#define DMA_TSB_SWAP_EN			(1 << 20) + +/* DMA status register */ +#define DMA_DISABLED			(1 << 0) +#define DMA_DESC_RAM_INIT_BUSY		(1 << 1) + +/* DMA SCB burst size register */ +#define DMA_SCB_BURST_SIZE_MASK		0x1F + +/* DMA activity vector register */ +#define DMA_ACTIVITY_VECTOR_MASK	0x1FFFF + +/* DMA backpressure mask register */ +#define DMA_BACKPRESSURE_MASK		0x1FFFF +#define DMA_PFC_ENABLE			(1 << 31) + +/* DMA backpressure status register */ +#define DMA_BACKPRESSURE_STATUS_MASK	0x1FFFF + +/* DMA override register */ +#define DMA_LITTLE_ENDIAN_MODE		(1 << 0) +#define DMA_REGISTER_MODE		(1 << 1) + +/* DMA timeout register */ +#define DMA_TIMEOUT_MASK		0xFFFF +#define DMA_TIMEOUT_VAL			5000	/* micro seconds */ + +/* TDMA rate limiting control register */ +#define DMA_RATE_LIMIT_EN_MASK		0xFFFF + +/* TDMA arbitration control register */ +#define DMA_ARBITER_MODE_MASK		0x03 +#define DMA_RING_BUF_PRIORITY_MASK	0x1F +#define DMA_RING_BUF_PRIORITY_SHIFT	5 +#define DMA_RATE_ADJ_MASK		0xFF + +/* Tx/Rx Dma Descriptor common bits*/ +#define DMA_BUFLENGTH_MASK		0x0fff +#define DMA_BUFLENGTH_SHIFT		16 +#define DMA_OWN				0x8000 +#define DMA_EOP				0x4000 +#define DMA_SOP				0x2000 +#define DMA_WRAP			0x1000 +/* Tx specific Dma descriptor bits */ +#define DMA_TX_UNDERRUN			0x0200 +#define DMA_TX_APPEND_CRC		0x0040 +#define DMA_TX_OW_CRC			0x0020 +#define DMA_TX_DO_CSUM			0x0010 +#define DMA_TX_QTAG_SHIFT		7 + +/* Rx Specific Dma descriptor bits */ +#define DMA_RX_CHK_V3PLUS		0x8000 +#define DMA_RX_CHK_V12			0x1000 +#define DMA_RX_BRDCAST			0x0040 +#define DMA_RX_MULT			0x0020 +#define DMA_RX_LG			0x0010 +#define DMA_RX_NO			0x0008 +#define DMA_RX_RXER			0x0004 +#define DMA_RX_CRC_ERROR		0x0002 +#define DMA_RX_OV			0x0001 +#define DMA_RX_FI_MASK			0x001F +#define DMA_RX_FI_SHIFT			0x0007 +#define DMA_DESC_ALLOC_MASK		0x00FF + +#define DMA_ARBITER_RR			0x00 +#define DMA_ARBITER_WRR			0x01 +#define DMA_ARBITER_SP			0x02 + +struct enet_cb { +	struct sk_buff      *skb; +	void __iomem *bd_addr; +	DEFINE_DMA_UNMAP_ADDR(dma_addr); +	DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +/* power management mode */ +enum bcmgenet_power_mode { +	GENET_POWER_CABLE_SENSE = 0, +	GENET_POWER_PASSIVE, +}; + +struct bcmgenet_priv; + +/* We support both runtime GENET detection and compile-time + * to optimize code-paths for a given hardware + */ +enum bcmgenet_version { +	GENET_V1 = 1, +	GENET_V2, +	GENET_V3, +	GENET_V4 +}; + +#define GENET_IS_V1(p)	((p)->version == GENET_V1) +#define GENET_IS_V2(p)	((p)->version == GENET_V2) +#define GENET_IS_V3(p)	((p)->version == GENET_V3) +#define GENET_IS_V4(p)	((p)->version == GENET_V4) + +/* Hardware flags */ +#define GENET_HAS_40BITS	(1 << 0) +#define GENET_HAS_EXT		(1 << 1) +#define GENET_HAS_MDIO_INTR	(1 << 2) + +/* BCMGENET hardware parameters, keep this structure nicely aligned + * since it is going to be used in hot paths + */ +struct bcmgenet_hw_params { +	u8		tx_queues; +	u8		rx_queues; +	u8		bds_cnt; +	u8		bp_in_en_shift; +	u32		bp_in_mask; +	u8		hfb_filter_cnt; +	u8		qtag_mask; +	u16		tbuf_offset; +	u32		hfb_offset; +	u32		hfb_reg_offset; +	u32		rdma_offset; +	u32		tdma_offset; +	u32		words_per_bd; +	u32		flags; +}; + +struct bcmgenet_tx_ring { +	spinlock_t	lock;		/* ring lock */ +	unsigned int	index;		/* ring index */ +	unsigned int	queue;		/* queue index */ +	struct enet_cb	*cbs;		/* tx ring buffer control block*/ +	unsigned int	size;		/* size of each tx ring */ +	unsigned int	c_index;	/* last consumer index of each ring*/ +	unsigned int	free_bds;	/* # of free bds for each ring */ +	unsigned int	write_ptr;	/* Tx ring write pointer SW copy */ +	unsigned int	prod_index;	/* Tx ring producer index SW copy */ +	unsigned int	cb_ptr;		/* Tx ring initial CB ptr */ +	unsigned int	end_ptr;	/* Tx ring end CB ptr */ +	void (*int_enable)(struct bcmgenet_priv *priv, +				struct bcmgenet_tx_ring *); +	void (*int_disable)(struct bcmgenet_priv *priv, +				struct bcmgenet_tx_ring *); +}; + +/* device context */ +struct bcmgenet_priv { +	void __iomem *base; +	enum bcmgenet_version version; +	struct net_device *dev; +	u32 int0_mask; +	u32 int1_mask; + +	/* NAPI for descriptor based rx */ +	struct napi_struct napi ____cacheline_aligned; + +	/* transmit variables */ +	void __iomem *tx_bds; +	struct enet_cb *tx_cbs; +	unsigned int num_tx_bds; + +	struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + +	/* receive variables */ +	void __iomem *rx_bds; +	void __iomem *rx_bd_assign_ptr; +	int rx_bd_assign_index; +	struct enet_cb *rx_cbs; +	unsigned int num_rx_bds; +	unsigned int rx_buf_len; +	unsigned int rx_read_ptr; +	unsigned int rx_c_index; + +	/* other misc variables */ +	struct bcmgenet_hw_params *hw_params; + +	/* MDIO bus variables */ +	wait_queue_head_t wq; +	struct phy_device *phydev; +	struct device_node *phy_dn; +	struct mii_bus *mii_bus; + +	/* PHY device variables */ +	int old_duplex; +	int old_link; +	int old_pause; +	phy_interface_t phy_interface; +	int phy_addr; +	int ext_phy; + +	/* Interrupt variables */ +	struct work_struct bcmgenet_irq_work; +	int irq0; +	int irq1; +	unsigned int irq0_stat; +	unsigned int irq1_stat; + +	/* HW descriptors/checksum variables */ +	bool desc_64b_en; +	bool desc_rxchk_en; +	bool crc_fwd_en; + +	unsigned int dma_rx_chk_bit; + +	u32 msg_enable; + +	struct clk *clk; +	struct platform_device *pdev; + +	/* WOL */ +	unsigned long wol_enabled; +	struct clk *clk_wol; +	u32 wolopts; + +	struct bcmgenet_mib_counters mib; +}; + +#define GENET_IO_MACRO(name, offset)					\ +static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv,	\ +					u32 off)			\ +{									\ +	return __raw_readl(priv->base + offset + off);			\ +}									\ +static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv,	\ +					u32 val, u32 off)		\ +{									\ +	__raw_writel(val, priv->base + offset + off);			\ +} + +GENET_IO_MACRO(ext, GENET_EXT_OFF); +GENET_IO_MACRO(umac, GENET_UMAC_OFF); +GENET_IO_MACRO(sys, GENET_SYS_OFF); + +/* interrupt l2 registers accessors */ +GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF); +GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF); + +/* HFB register accessors  */ +GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset); + +/* GENET v2+ HFB control and filter len helpers */ +GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset); + +/* RBUF register accessors */ +GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); + +/* MDIO routines */ +int bcmgenet_mii_init(struct net_device *dev); +int bcmgenet_mii_config(struct net_device *dev); +void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_mii_reset(struct net_device *dev); + +#endif /* __BCMGENET_H__ */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c new file mode 100644 index 00000000000..add8d859608 --- /dev/null +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -0,0 +1,469 @@ +/* + * Broadcom GENET MDIO routines + * + * Copyright (c) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/brcmphy.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> + +#include "bcmgenet.h" + +/* read a value from the MII */ +static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) +{ +	int ret; +	struct net_device *dev = bus->priv; +	struct bcmgenet_priv *priv = netdev_priv(dev); +	u32 reg; + +	bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | +			(location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); +	/* Start MDIO transaction*/ +	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); +	reg |= MDIO_START_BUSY; +	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); +	wait_event_timeout(priv->wq, +			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) +				& MDIO_START_BUSY), +			HZ / 100); +	ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); + +	if (ret & MDIO_READ_FAIL) +		return -EIO; + +	return ret & 0xffff; +} + +/* write a value to the MII */ +static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, +			int location, u16 val) +{ +	struct net_device *dev = bus->priv; +	struct bcmgenet_priv *priv = netdev_priv(dev); +	u32 reg; + +	bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | +			(location << MDIO_REG_SHIFT) | (0xffff & val)), +			UMAC_MDIO_CMD); +	reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); +	reg |= MDIO_START_BUSY; +	bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); +	wait_event_timeout(priv->wq, +			!(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & +				MDIO_START_BUSY), +			HZ / 100); + +	return 0; +} + +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +static void bcmgenet_mii_setup(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; +	u32 reg, cmd_bits = 0; +	unsigned int status_changed = 0; + +	if (priv->old_link != phydev->link) { +		status_changed = 1; +		priv->old_link = phydev->link; +	} + +	if (phydev->link) { +		/* program UMAC and RGMII block based on established link +		 * speed, pause, and duplex. +		 * the speed set in umac->cmd tell RGMII block which clock +		 * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit. +		 * receive clock is provided by PHY. +		 */ +		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); +		reg &= ~OOB_DISABLE; +		reg |= RGMII_LINK; +		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + +		/* speed */ +		if (phydev->speed == SPEED_1000) +			cmd_bits = UMAC_SPEED_1000; +		else if (phydev->speed == SPEED_100) +			cmd_bits = UMAC_SPEED_100; +		else +			cmd_bits = UMAC_SPEED_10; +		cmd_bits <<= CMD_SPEED_SHIFT; + +		if (priv->old_duplex != phydev->duplex) { +			status_changed = 1; +			priv->old_duplex = phydev->duplex; +		} + +		/* duplex */ +		if (phydev->duplex != DUPLEX_FULL) +			cmd_bits |= CMD_HD_EN; + +		if (priv->old_pause != phydev->pause) { +			status_changed = 1; +			priv->old_pause = phydev->pause; +		} + +		/* pause capability */ +		if (!phydev->pause) +			cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + +		reg = bcmgenet_umac_readl(priv, UMAC_CMD); +		reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | +			       CMD_HD_EN | +			       CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); +		reg |= cmd_bits; +		bcmgenet_umac_writel(priv, reg, UMAC_CMD); +	} + +	if (status_changed) +		phy_print_status(phydev); +} + +void bcmgenet_mii_reset(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	if (priv->phydev) { +		phy_init_hw(priv->phydev); +		phy_start_aneg(priv->phydev); +	} +} + +static void bcmgenet_ephy_power_up(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	u32 reg = 0; + +	/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ +	if (!GENET_IS_V4(priv)) +		return; + +	reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); +	reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN); +	reg |= EXT_GPHY_RESET; +	bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); +	mdelay(2); + +	reg &= ~EXT_GPHY_RESET; +	bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL); +	udelay(20); +} + +static void bcmgenet_internal_phy_setup(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	u32 reg; + +	/* Power up EPHY */ +	bcmgenet_ephy_power_up(dev); +	/* enable APD */ +	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); +	reg |= EXT_PWR_DN_EN_LD; +	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); +	bcmgenet_mii_reset(dev); +} + +static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) +{ +	u32 reg; + +	/* Speed settings are set in bcmgenet_mii_setup() */ +	reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL); +	reg |= LED_ACT_SOURCE_MAC; +	bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); +} + +int bcmgenet_mii_config(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct phy_device *phydev = priv->phydev; +	struct device *kdev = &priv->pdev->dev; +	const char *phy_name = NULL; +	u32 id_mode_dis = 0; +	u32 port_ctrl; +	u32 reg; + +	priv->ext_phy = !phy_is_internal(priv->phydev) && +			(priv->phy_interface != PHY_INTERFACE_MODE_MOCA); + +	if (phy_is_internal(priv->phydev)) +		priv->phy_interface = PHY_INTERFACE_MODE_NA; + +	switch (priv->phy_interface) { +	case PHY_INTERFACE_MODE_NA: +	case PHY_INTERFACE_MODE_MOCA: +		/* Irrespective of the actually configured PHY speed (100 or +		 * 1000) GENETv4 only has an internal GPHY so we will just end +		 * up masking the Gigabit features from what we support, not +		 * switching to the EPHY +		 */ +		if (GENET_IS_V4(priv)) +			port_ctrl = PORT_MODE_INT_GPHY; +		else +			port_ctrl = PORT_MODE_INT_EPHY; + +		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); + +		if (phy_is_internal(priv->phydev)) { +			phy_name = "internal PHY"; +			bcmgenet_internal_phy_setup(dev); +		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { +			phy_name = "MoCA"; +			bcmgenet_moca_phy_setup(priv); +		} +		break; + +	case PHY_INTERFACE_MODE_MII: +		phy_name = "external MII"; +		phydev->supported &= PHY_BASIC_FEATURES; +		bcmgenet_sys_writel(priv, +				PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); +		break; + +	case PHY_INTERFACE_MODE_REVMII: +		phy_name = "external RvMII"; +		/* of_mdiobus_register took care of reading the 'max-speed' +		 * PHY property for us, effectively limiting the PHY supported +		 * capabilities, use that knowledge to also configure the +		 * Reverse MII interface correctly. +		 */ +		if ((priv->phydev->supported & PHY_BASIC_FEATURES) == +				PHY_BASIC_FEATURES) +			port_ctrl = PORT_MODE_EXT_RVMII_25; +		else +			port_ctrl = PORT_MODE_EXT_RVMII_50; +		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); +		break; + +	case PHY_INTERFACE_MODE_RGMII: +		/* RGMII_NO_ID: TXC transitions at the same time as TXD +		 *		(requires PCB or receiver-side delay) +		 * RGMII:	Add 2ns delay on TXC (90 degree shift) +		 * +		 * ID is implicitly disabled for 100Mbps (RG)MII operation. +		 */ +		id_mode_dis = BIT(16); +		/* fall through */ +	case PHY_INTERFACE_MODE_RGMII_TXID: +		if (id_mode_dis) +			phy_name = "external RGMII (no delay)"; +		else +			phy_name = "external RGMII (TX delay)"; +		reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); +		reg |= RGMII_MODE_EN | id_mode_dis; +		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); +		bcmgenet_sys_writel(priv, +				PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); +		break; +	default: +		dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface); +		return -EINVAL; +	} + +	dev_info(kdev, "configuring instance for %s\n", phy_name); + +	return 0; +} + +static int bcmgenet_mii_probe(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	struct device_node *dn = priv->pdev->dev.of_node; +	struct phy_device *phydev; +	unsigned int phy_flags; +	int ret; + +	if (priv->phydev) { +		pr_info("PHY already attached\n"); +		return 0; +	} + +	/* In the case of a fixed PHY, the DT node associated +	 * to the PHY is the Ethernet MAC DT node. +	 */ +	if (of_phy_is_fixed_link(dn)) { +		ret = of_phy_register_fixed_link(dn); +		if (ret) +			return ret; + +		priv->phy_dn = dn; +	} + +	phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, +				priv->phy_interface); +	if (!phydev) { +		pr_err("could not attach to PHY\n"); +		return -ENODEV; +	} + +	priv->old_link = -1; +	priv->old_duplex = -1; +	priv->old_pause = -1; +	priv->phydev = phydev; + +	/* Configure port multiplexer based on what the probed PHY device since +	 * reading the 'max-speed' property determines the maximum supported +	 * PHY speed which is needed for bcmgenet_mii_config() to configure +	 * things appropriately. +	 */ +	ret = bcmgenet_mii_config(dev); +	if (ret) { +		phy_disconnect(priv->phydev); +		return ret; +	} + +	phy_flags = PHY_BRCM_100MBPS_WAR; + +	/* workarounds are only needed for 100Mpbs PHYs, and +	 * never on GENET V1 hardware +	 */ +	if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv)) +		phy_flags = 0; + +	phydev->dev_flags |= phy_flags; +	phydev->advertising = phydev->supported; + +	/* The internal PHY has its link interrupts routed to the +	 * Ethernet MAC ISRs +	 */ +	if (phy_is_internal(priv->phydev)) +		priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; +	else +		priv->mii_bus->irq[phydev->addr] = PHY_POLL; + +	pr_info("attached PHY at address %d [%s]\n", +			phydev->addr, phydev->drv->name); + +	return 0; +} + +static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) +{ +	struct mii_bus *bus; + +	if (priv->mii_bus) +		return 0; + +	priv->mii_bus = mdiobus_alloc(); +	if (!priv->mii_bus) { +		pr_err("failed to allocate\n"); +		return -ENOMEM; +	} + +	bus = priv->mii_bus; +	bus->priv = priv->dev; +	bus->name = "bcmgenet MII bus"; +	bus->parent = &priv->pdev->dev; +	bus->read = bcmgenet_mii_read; +	bus->write = bcmgenet_mii_write; +	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", +			priv->pdev->name, priv->pdev->id); + +	bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); +	if (!bus->irq) { +		mdiobus_free(priv->mii_bus); +		return -ENOMEM; +	} + +	return 0; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ +	struct device_node *dn = priv->pdev->dev.of_node; +	struct device *kdev = &priv->pdev->dev; +	struct device_node *mdio_dn; +	char *compat; +	int ret; + +	compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); +	if (!compat) +		return -ENOMEM; + +	mdio_dn = of_find_compatible_node(dn, NULL, compat); +	kfree(compat); +	if (!mdio_dn) { +		dev_err(kdev, "unable to find MDIO bus node\n"); +		return -ENODEV; +	} + +	ret = of_mdiobus_register(priv->mii_bus, mdio_dn); +	if (ret) { +		dev_err(kdev, "failed to register MDIO bus\n"); +		return ret; +	} + +	/* Fetch the PHY phandle */ +	priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); + +	/* Get the link mode */ +	priv->phy_interface = of_get_phy_mode(dn); + +	return 0; +} + +int bcmgenet_mii_init(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); +	int ret; + +	ret = bcmgenet_mii_alloc(priv); +	if (ret) +		return ret; + +	ret = bcmgenet_mii_of_init(priv); +	if (ret) +		goto out_free; + +	ret = bcmgenet_mii_probe(dev); +	if (ret) +		goto out; + +	return 0; + +out: +	mdiobus_unregister(priv->mii_bus); +out_free: +	kfree(priv->mii_bus->irq); +	mdiobus_free(priv->mii_bus); +	return ret; +} + +void bcmgenet_mii_exit(struct net_device *dev) +{ +	struct bcmgenet_priv *priv = netdev_priv(dev); + +	mdiobus_unregister(priv->mii_bus); +	kfree(priv->mii_bus->irq); +	mdiobus_free(priv->mii_bus); +} diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index c2777712da9..b61c14ed9b8 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -13,8 +13,7 @@   * GNU General Public License for more details.   *   * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. + * along with this program; if not, see <http://www.gnu.org/licenses/>.   *   *   * This driver is designed for the Broadcom SiByte SOC built-in @@ -36,7 +35,6 @@  #include <linux/netdevice.h>  #include <linux/etherdevice.h>  #include <linux/skbuff.h> -#include <linux/init.h>  #include <linux/bitops.h>  #include <linux/err.h>  #include <linux/ethtool.h> diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 12d961c4ebc..8afa579e7c4 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4,7 +4,7 @@   * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)   * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)   * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2013 Broadcom Corporation. + * Copyright (C) 2005-2014 Broadcom Corporation.   *   * Firmware is:   *	Derived from proprietary unpublished source code, @@ -25,7 +25,6 @@  #include <linux/slab.h>  #include <linux/delay.h>  #include <linux/in.h> -#include <linux/init.h>  #include <linux/interrupt.h>  #include <linux/ioport.h>  #include <linux/pci.h> @@ -37,6 +36,7 @@  #include <linux/mii.h>  #include <linux/phy.h>  #include <linux/brcmphy.h> +#include <linux/if.h>  #include <linux/if_vlan.h>  #include <linux/ip.h>  #include <linux/tcp.h> @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)  #define DRV_MODULE_NAME		"tg3"  #define TG3_MAJ_NUM			3 -#define TG3_MIN_NUM			133 +#define TG3_MIN_NUM			137  #define DRV_MODULE_VERSION	\  	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE	"Jul 29, 2013" +#define DRV_MODULE_RELDATE	"May 11, 2014"  #define RESET_KIND_SHUTDOWN	0  #define RESET_KIND_INIT		1 @@ -208,6 +208,9 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)  #define TG3_RAW_IP_ALIGN 2 +#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3) +#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1) +  #define TG3_FW_UPDATE_TIMEOUT_SEC	5  #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2) @@ -337,6 +340,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {  	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},  	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},  	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)}, +	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)}, +	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)}, +	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)}, +	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)}, +	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},  	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},  	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},  	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, @@ -1326,6 +1334,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)  	return err;  } +static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val) +{ +	return tg3_writephy(tp, MII_TG3_MISC_SHDW, +			    reg | val | MII_TG3_MISC_SHDW_WREN); +} +  static int tg3_bmcr_reset(struct tg3 *tp)  {  	u32 phy_control; @@ -1364,7 +1378,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)  	spin_lock_bh(&tp->lock); -	if (tg3_readphy(tp, reg, &val)) +	if (__tg3_readphy(tp, mii_id, reg, &val))  		val = -EIO;  	spin_unlock_bh(&tp->lock); @@ -1379,7 +1393,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)  	spin_lock_bh(&tp->lock); -	if (tg3_writephy(tp, reg, val)) +	if (__tg3_writephy(tp, mii_id, reg, val))  		ret = -EIO;  	spin_unlock_bh(&tp->lock); @@ -1387,17 +1401,12 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)  	return ret;  } -static int tg3_mdio_reset(struct mii_bus *bp) -{ -	return 0; -} -  static void tg3_mdio_config_5785(struct tg3 *tp)  {  	u32 val;  	struct phy_device *phydev; -	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +	phydev = tp->mdio_bus->phy_map[tp->phy_addr];  	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {  	case PHY_ID_BCM50610:  	case PHY_ID_BCM50610M: @@ -1502,6 +1511,13 @@ static int tg3_mdio_init(struct tg3 *tp)  				    TG3_CPMU_PHY_STRAP_IS_SERDES;  		if (is_serdes)  			tp->phy_addr += 7; +	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) { +		int addr; + +		addr = ssb_gige_get_phyaddr(tp->pdev); +		if (addr < 0) +			return addr; +		tp->phy_addr = addr;  	} else  		tp->phy_addr = TG3_PHY_MII_ADDR; @@ -1521,8 +1537,7 @@ static int tg3_mdio_init(struct tg3 *tp)  	tp->mdio_bus->parent   = &tp->pdev->dev;  	tp->mdio_bus->read     = &tg3_mdio_read;  	tp->mdio_bus->write    = &tg3_mdio_write; -	tp->mdio_bus->reset    = &tg3_mdio_reset; -	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); +	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);  	tp->mdio_bus->irq      = &tp->mdio_irq[0];  	for (i = 0; i < PHY_MAX_ADDR; i++) @@ -1543,7 +1558,7 @@ static int tg3_mdio_init(struct tg3 *tp)  		return i;  	} -	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +	phydev = tp->mdio_bus->phy_map[tp->phy_addr];  	if (!phydev || !phydev->drv) {  		dev_warn(&tp->pdev->dev, "No PHY devices\n"); @@ -1953,7 +1968,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)  	u32 old_tx_mode = tp->tx_mode;  	if (tg3_flag(tp, USE_PHYLIB)) -		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; +		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;  	else  		autoneg = tp->link_config.autoneg; @@ -1989,7 +2004,7 @@ static void tg3_adjust_link(struct net_device *dev)  	u8 oldflowctrl, linkmesg = 0;  	u32 mac_mode, lcl_adv, rmt_adv;  	struct tg3 *tp = netdev_priv(dev); -	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];  	spin_lock_bh(&tp->lock); @@ -2078,7 +2093,7 @@ static int tg3_phy_init(struct tg3 *tp)  	/* Bring the PHY back to a known state. */  	tg3_bmcr_reset(tp); -	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +	phydev = tp->mdio_bus->phy_map[tp->phy_addr];  	/* Attach the MAC to the PHY. */  	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), @@ -2105,7 +2120,7 @@ static int tg3_phy_init(struct tg3 *tp)  				      SUPPORTED_Asym_Pause);  		break;  	default: -		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);  		return -EINVAL;  	} @@ -2123,7 +2138,7 @@ static void tg3_phy_start(struct tg3 *tp)  	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  		return; -	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +	phydev = tp->mdio_bus->phy_map[tp->phy_addr];  	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {  		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; @@ -2143,13 +2158,13 @@ static void tg3_phy_stop(struct tg3 *tp)  	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  		return; -	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);  }  static void tg3_phy_fini(struct tg3 *tp)  {  	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { -		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);  		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;  	}  } @@ -2218,25 +2233,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)  		return;  	} -	reg = MII_TG3_MISC_SHDW_WREN | -	      MII_TG3_MISC_SHDW_SCR5_SEL | -	      MII_TG3_MISC_SHDW_SCR5_LPED | +	reg = MII_TG3_MISC_SHDW_SCR5_LPED |  	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |  	      MII_TG3_MISC_SHDW_SCR5_SDTL |  	      MII_TG3_MISC_SHDW_SCR5_C125OE;  	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)  		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD; -	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); +	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg); -	reg = MII_TG3_MISC_SHDW_WREN | -	      MII_TG3_MISC_SHDW_APD_SEL | -	      MII_TG3_MISC_SHDW_APD_WKTM_84MS; +	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;  	if (enable)  		reg |= MII_TG3_MISC_SHDW_APD_ENABLE; -	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg); +	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);  }  static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable) @@ -2592,13 +2603,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)  	tg3_writephy(tp, MII_CTRL1000, phy9_orig); -	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) { -		reg32 &= ~0x3000; -		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); -	} else if (!err) -		err = -EBUSY; +	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32); +	if (err) +		return err; -	return err; +	reg32 &= ~0x3000; +	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32); + +	return 0;  }  static void tg3_carrier_off(struct tg3 *tp) @@ -3212,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,  	return 0;  } -#define NVRAM_CMD_TIMEOUT 10000 +#define NVRAM_CMD_TIMEOUT 5000  static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)  { @@ -3220,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)  	tw32(NVRAM_CMD, nvram_cmd);  	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { -		udelay(10); +		usleep_range(10, 40);  		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {  			udelay(10);  			break; @@ -3934,32 +3946,41 @@ static int tg3_load_tso_firmware(struct tg3 *tp)  	return 0;  } +/* tp->lock is held. */ +static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index) +{ +	u32 addr_high, addr_low; + +	addr_high = ((mac_addr[0] << 8) | mac_addr[1]); +	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) | +		    (mac_addr[4] <<  8) | mac_addr[5]); + +	if (index < 4) { +		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high); +		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low); +	} else { +		index -= 4; +		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high); +		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low); +	} +}  /* tp->lock is held. */  static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)  { -	u32 addr_high, addr_low; +	u32 addr_high;  	int i; -	addr_high = ((tp->dev->dev_addr[0] << 8) | -		     tp->dev->dev_addr[1]); -	addr_low = ((tp->dev->dev_addr[2] << 24) | -		    (tp->dev->dev_addr[3] << 16) | -		    (tp->dev->dev_addr[4] <<  8) | -		    (tp->dev->dev_addr[5] <<  0));  	for (i = 0; i < 4; i++) {  		if (i == 1 && skip_mac_1)  			continue; -		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); -		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); +		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);  	}  	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||  	    tg3_asic_rev(tp) == ASIC_REV_5704) { -		for (i = 0; i < 12; i++) { -			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); -			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); -		} +		for (i = 4; i < 16; i++) +			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);  	}  	addr_high = (tp->dev->dev_addr[0] + @@ -4027,7 +4048,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)  			struct phy_device *phydev;  			u32 phyid, advertising; -			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +			phydev = tp->mdio_bus->phy_map[tp->phy_addr];  			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; @@ -4389,9 +4410,12 @@ static void tg3_phy_copper_begin(struct tg3 *tp)  			if (tg3_flag(tp, WOL_SPEED_100MB))  				adv |= ADVERTISED_100baseT_Half |  				       ADVERTISED_100baseT_Full; -			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) -				adv |= ADVERTISED_1000baseT_Half | -				       ADVERTISED_1000baseT_Full; +			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) { +				if (!(tp->phy_flags & +				      TG3_PHYFLG_DISABLE_1G_HD_ADV)) +					adv |= ADVERTISED_1000baseT_Half; +				adv |= ADVERTISED_1000baseT_Full; +			}  			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;  		} else { @@ -6292,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {  	.n_alarm	= 0,  	.n_ext_ts	= 0,  	.n_per_out	= 1, +	.n_pins		= 0,  	.pps		= 0,  	.adjfreq	= tg3_ptp_adjfreq,  	.adjtime	= tg3_ptp_adjtime, @@ -6563,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi)  		pkts_compl++;  		bytes_compl += skb->len; -		dev_kfree_skb(skb); +		dev_kfree_skb_any(skb);  		if (unlikely(tx_bug)) {  			tg3_tx_recover(tp); @@ -6813,8 +6838,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)  		work_mask |= opaque_key; -		if ((desc->err_vlan & RXD_ERR_MASK) != 0 && -		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { +		if (desc->err_vlan & RXD_ERR_MASK) {  		drop_it:  			tg3_recycle_rx(tnapi, tpr, opaque_key,  				       desc_idx, *post_ptr); @@ -6848,12 +6872,6 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)  			pci_unmap_single(tp->pdev, dma_addr, skb_size,  					 PCI_DMA_FROMDEVICE); -			skb = build_skb(data, frag_size); -			if (!skb) { -				tg3_frag_free(frag_size != 0, data); -				goto drop_it_no_recycle; -			} -			skb_reserve(skb, TG3_RX_OFFSET(tp));  			/* Ensure that the update to the data happens  			 * after the usage of the old DMA mapping.  			 */ @@ -6861,6 +6879,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)  			ri->data = NULL; +			skb = build_skb(data, frag_size); +			if (!skb) { +				tg3_frag_free(frag_size != 0, data); +				goto drop_it_no_recycle; +			} +			skb_reserve(skb, TG3_RX_OFFSET(tp));  		} else {  			tg3_recycle_rx(tnapi, tpr, opaque_key,  				       desc_idx, *post_ptr); @@ -6895,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)  		if (len > (tp->dev->mtu + ETH_HLEN) &&  		    skb->protocol != htons(ETH_P_8021Q)) { -			dev_kfree_skb(skb); +			dev_kfree_skb_any(skb);  			goto drop_it_no_recycle;  		} @@ -7608,7 +7632,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)  {  	u32 base = (u32) mapping & 0xffffffff; -	return (base > 0xffffdcc0) && (base + len + 8 < base); +	return base + len + 8 < base;  }  /* Test for TSO DMA buffers that cross into regions which are within MSS bytes @@ -7778,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,  					  PCI_DMA_TODEVICE);  		/* Make sure the mapping succeeded */  		if (pci_dma_mapping_error(tp->pdev, new_addr)) { -			dev_kfree_skb(new_skb); +			dev_kfree_skb_any(new_skb);  			ret = -1;  		} else {  			u32 save_entry = *entry; @@ -7793,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,  					    new_skb->len, base_flags,  					    mss, vlan)) {  				tg3_tx_skb_unmap(tnapi, save_entry, -1); -				dev_kfree_skb(new_skb); +				dev_kfree_skb_any(new_skb);  				ret = -1;  			}  		}  	} -	dev_kfree_skb(skb); +	dev_kfree_skb_any(skb);  	*pskb = new_skb;  	return ret;  } @@ -7830,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)  		netif_wake_queue(tp->dev);  	} -	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); -	if (IS_ERR(segs)) +	segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); +	if (IS_ERR(segs) || !segs)  		goto tg3_tso_bug_end;  	do { @@ -7842,14 +7866,12 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)  	} while (segs);  tg3_tso_bug_end: -	dev_kfree_skb(skb); +	dev_kfree_skb_any(skb);  	return NETDEV_TX_OK;  } -/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and - * support TG3_FLAG_HW_TSO_1 or firmware TSO only. - */ +/* hard_start_xmit for all devices */  static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  {  	struct tg3 *tp = netdev_priv(dev); @@ -7860,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  	struct tg3_napi *tnapi;  	struct netdev_queue *txq;  	unsigned int last; +	struct iphdr *iph = NULL; +	struct tcphdr *tcph = NULL; +	__sum16 tcp_csum = 0, ip_csum = 0; +	__be16 ip_tot_len = 0;  	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));  	tnapi = &tp->napi[skb_get_queue_mapping(skb)]; @@ -7891,11 +7917,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  	mss = skb_shinfo(skb)->gso_size;  	if (mss) { -		struct iphdr *iph;  		u32 tcp_opt_len, hdr_len; -		if (skb_header_cloned(skb) && -		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) +		if (skb_cow_head(skb, 0))  			goto drop;  		iph = ip_hdr(skb); @@ -7904,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;  		if (!skb_is_gso_v6(skb)) { +			if (unlikely((ETH_HLEN + hdr_len) > 80) && +			    tg3_flag(tp, TSO_BUG)) +				return tg3_tso_bug(tp, skb); + +			ip_csum = iph->check; +			ip_tot_len = iph->tot_len;  			iph->check = 0;  			iph->tot_len = htons(mss + hdr_len);  		} -		if (unlikely((ETH_HLEN + hdr_len) > 80) && -		    tg3_flag(tp, TSO_BUG)) -			return tg3_tso_bug(tp, skb); -  		base_flags |= (TXD_FLAG_CPU_PRE_DMA |  			       TXD_FLAG_CPU_POST_DMA); +		tcph = tcp_hdr(skb); +		tcp_csum = tcph->check; +  		if (tg3_flag(tp, HW_TSO_1) ||  		    tg3_flag(tp, HW_TSO_2) ||  		    tg3_flag(tp, HW_TSO_3)) { -			tcp_hdr(skb)->check = 0; +			tcph->check = 0;  			base_flags &= ~TXD_FLAG_TCPUDP_CSUM; -		} else -			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, -								 iph->daddr, 0, -								 IPPROTO_TCP, -								 0); +		} else { +			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, +							 0, IPPROTO_TCP, 0); +		}  		if (tg3_flag(tp, HW_TSO_3)) {  			mss |= (hdr_len & 0xc) << 12; @@ -8024,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)  	if (would_hit_hwbug) {  		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); +		if (mss) { +			/* If it's a TSO packet, do GSO instead of +			 * allocating and copying to a large linear SKB +			 */ +			if (ip_tot_len) { +				iph->check = ip_csum; +				iph->tot_len = ip_tot_len; +			} +			tcph->check = tcp_csum; +			return tg3_tso_bug(tp, skb); +		} +  		/* If the workaround fails due to memory/mapping  		 * failure, silently drop this packet.  		 */ @@ -8064,7 +8104,7 @@ dma_error:  	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);  	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;  drop: -	dev_kfree_skb(skb); +	dev_kfree_skb_any(skb);  drop_nofree:  	tp->tx_dropped++;  	return NETDEV_TX_OK; @@ -8911,6 +8951,49 @@ static void tg3_restore_pci_state(struct tg3 *tp)  	}  } +static void tg3_override_clk(struct tg3 *tp) +{ +	u32 val; + +	switch (tg3_asic_rev(tp)) { +	case ASIC_REV_5717: +		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); +		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val | +		     TG3_CPMU_MAC_ORIDE_ENABLE); +		break; + +	case ASIC_REV_5719: +	case ASIC_REV_5720: +		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); +		break; + +	default: +		return; +	} +} + +static void tg3_restore_clk(struct tg3 *tp) +{ +	u32 val; + +	switch (tg3_asic_rev(tp)) { +	case ASIC_REV_5717: +		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE); +		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, +		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE); +		break; + +	case ASIC_REV_5719: +	case ASIC_REV_5720: +		val = tr32(TG3_CPMU_CLCK_ORIDE); +		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); +		break; + +	default: +		return; +	} +} +  /* tp->lock is held. */  static int tg3_chip_reset(struct tg3 *tp)  { @@ -8918,6 +9001,9 @@ static int tg3_chip_reset(struct tg3 *tp)  	void (*write_op)(struct tg3 *, u32, u32);  	int i, err; +	if (!pci_device_is_present(tp->pdev)) +		return -ENODEV; +  	tg3_nvram_lock(tp);  	tg3_ape_lock(tp, TG3_APE_LOCK_GRC); @@ -8996,6 +9082,13 @@ static int tg3_chip_reset(struct tg3 *tp)  		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);  	} +	/* Set the clock to the highest frequency to avoid timeouts. With link +	 * aware mode, the clock speed could be slow and bootcode does not +	 * complete within the expected time. Override the clock to allow the +	 * bootcode to finish sooner and then restore it. +	 */ +	tg3_override_clk(tp); +  	/* Manage gphy power for all CPMU absent PCIe devices. */  	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))  		val |= GRC_MISC_CFG_KEEP_GPHY_POWER; @@ -9134,10 +9227,7 @@ static int tg3_chip_reset(struct tg3 *tp)  		tw32(0x7c00, val | (1 << 25));  	} -	if (tg3_asic_rev(tp) == ASIC_REV_5720) { -		val = tr32(TG3_CPMU_CLCK_ORIDE); -		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN); -	} +	tg3_restore_clk(tp);  	/* Reprobe ASF enable state.  */  	tg3_flag_clear(tp, ENABLE_ASF); @@ -9169,6 +9259,7 @@ static int tg3_chip_reset(struct tg3 *tp)  static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);  static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *); +static void __tg3_set_rx_mode(struct net_device *);  /* tp->lock is held. */  static int tg3_halt(struct tg3 *tp, int kind, bool silent) @@ -9196,10 +9287,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)  		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));  	} -	if (err) -		return err; - -	return 0; +	return err;  }  static int tg3_set_mac_addr(struct net_device *dev, void *p) @@ -9232,6 +9320,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)  	}  	spin_lock_bh(&tp->lock);  	__tg3_set_mac_addr(tp, skip_mac_1); +	__tg3_set_rx_mode(dev);  	spin_unlock_bh(&tp->lock);  	return err; @@ -9620,6 +9709,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)  		tw32(MAC_HASH_REG_3, mc_filter[3]);  	} +	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) { +		rx_mode |= RX_MODE_PROMISC; +	} else if (!(dev->flags & IFF_PROMISC)) { +		/* Add all entries into to the mac addr filter list */ +		int i = 0; +		struct netdev_hw_addr *ha; + +		netdev_for_each_uc_addr(ha, dev) { +			__tg3_set_one_mac_addr(tp, ha->addr, +					       i + TG3_UCAST_ADDR_IDX(tp)); +			i++; +		} +	} +  	if (rx_mode != tp->rx_mode) {  		tp->rx_mode = rx_mode;  		tw32_f(MAC_RX_MODE, rx_mode); @@ -9952,6 +10055,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)  	if (tg3_asic_rev(tp) == ASIC_REV_5719)  		val |= BUFMGR_MODE_NO_TX_UNDERRUN;  	if (tg3_asic_rev(tp) == ASIC_REV_5717 || +	    tg3_asic_rev(tp) == ASIC_REV_5762 ||  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)  		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB; @@ -10618,10 +10722,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)  static ssize_t tg3_show_temp(struct device *dev,  			     struct device_attribute *devattr, char *buf)  { -	struct pci_dev *pdev = to_pci_dev(dev); -	struct net_device *netdev = pci_get_drvdata(pdev); -	struct tg3 *tp = netdev_priv(netdev);  	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); +	struct tg3 *tp = dev_get_drvdata(dev);  	u32 temperature;  	spin_lock_bh(&tp->lock); @@ -10639,29 +10741,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,  static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,  			  TG3_TEMP_MAX_OFFSET); -static struct attribute *tg3_attributes[] = { +static struct attribute *tg3_attrs[] = {  	&sensor_dev_attr_temp1_input.dev_attr.attr,  	&sensor_dev_attr_temp1_crit.dev_attr.attr,  	&sensor_dev_attr_temp1_max.dev_attr.attr,  	NULL  }; - -static const struct attribute_group tg3_group = { -	.attrs = tg3_attributes, -}; +ATTRIBUTE_GROUPS(tg3);  static void tg3_hwmon_close(struct tg3 *tp)  {  	if (tp->hwmon_dev) {  		hwmon_device_unregister(tp->hwmon_dev);  		tp->hwmon_dev = NULL; -		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);  	}  }  static void tg3_hwmon_open(struct tg3 *tp)  { -	int i, err; +	int i;  	u32 size = 0;  	struct pci_dev *pdev = tp->pdev;  	struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; @@ -10679,18 +10777,11 @@ static void tg3_hwmon_open(struct tg3 *tp)  	if (!size)  		return; -	/* Register hwmon sysfs hooks */ -	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); -	if (err) { -		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); -		return; -	} - -	tp->hwmon_dev = hwmon_device_register(&pdev->dev); +	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", +							  tp, tg3_groups);  	if (IS_ERR(tp->hwmon_dev)) {  		tp->hwmon_dev = NULL;  		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); -		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);  	}  } @@ -10750,6 +10841,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)  	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);  	if (tg3_asic_rev(tp) != ASIC_REV_5717 && +	    tg3_asic_rev(tp) != ASIC_REV_5762 &&  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&  	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {  		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT); @@ -10878,6 +10970,13 @@ static void tg3_timer(unsigned long __opaque)  		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&  			   tg3_flag(tp, 5780_CLASS)) {  			tg3_serdes_parallel_detect(tp); +		} else if (tg3_flag(tp, POLL_CPMU_LINK)) { +			u32 cpmu = tr32(TG3_CPMU_STATUS); +			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) == +					 TG3_CPMU_STATUS_LINK_MASK); + +			if (link_up != tp->link_up) +				tg3_setup_phy(tp, false);  		}  		tp->timer_counter = tp->timer_multiplier; @@ -11035,7 +11134,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)  		name = tp->dev->name;  	else {  		name = &tnapi->irq_lbl[0]; -		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num); +		if (tnapi->tx_buffers && tnapi->rx_rcb) +			snprintf(name, IFNAMSIZ, +				 "%s-txrx-%d", tp->dev->name, irq_num); +		else if (tnapi->tx_buffers) +			snprintf(name, IFNAMSIZ, +				 "%s-tx-%d", tp->dev->name, irq_num); +		else if (tnapi->rx_rcb) +			snprintf(name, IFNAMSIZ, +				 "%s-rx-%d", tp->dev->name, irq_num); +		else +			snprintf(name, IFNAMSIZ, +				 "%s-%d", tp->dev->name, irq_num);  		name[IFNAMSIZ-1] = 0;  	} @@ -11262,12 +11372,10 @@ static bool tg3_enable_msix(struct tg3 *tp)  		msix_ent[i].vector = 0;  	} -	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); +	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);  	if (rc < 0) {  		return false; -	} else if (rc != 0) { -		if (pci_enable_msix(tp->pdev, msix_ent, rc)) -			return false; +	} else if (rc < tp->irq_cnt) {  		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",  			      tp->irq_cnt, rc);  		tp->irq_cnt = rc; @@ -11572,10 +11680,11 @@ static int tg3_close(struct net_device *dev)  	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));  	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev)); -	tg3_power_down_prepare(tp); - -	tg3_carrier_off(tp); +	if (pci_device_is_present(tp->pdev)) { +		tg3_power_down_prepare(tp); +		tg3_carrier_off(tp); +	}  	return 0;  } @@ -11733,8 +11842,6 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)  		get_stat64(&hw_stats->rx_frame_too_long_errors) +  		get_stat64(&hw_stats->rx_undersize_packets); -	stats->rx_over_errors = old_stats->rx_over_errors + -		get_stat64(&hw_stats->rxbds_empty);  	stats->rx_frame_errors = old_stats->rx_frame_errors +  		get_stat64(&hw_stats->rx_align_errors);  	stats->tx_aborted_errors = old_stats->tx_aborted_errors + @@ -11786,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)  static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)  {  	struct tg3 *tp = netdev_priv(dev); -	int ret; +	int ret, cpmu_restore = 0;  	u8  *pd; -	u32 i, offset, len, b_offset, b_count; +	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;  	__be32 val;  	if (tg3_flag(tp, NO_NVRAM)) @@ -11800,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,  	eeprom->magic = TG3_EEPROM_MAGIC; +	/* Override clock, link aware and link idle modes */ +	if (tg3_flag(tp, CPMU_PRESENT)) { +		cpmu_val = tr32(TG3_CPMU_CTRL); +		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE | +				CPMU_CTRL_LINK_IDLE_MODE)) { +			tw32(TG3_CPMU_CTRL, cpmu_val & +					    ~(CPMU_CTRL_LINK_AWARE_MODE | +					     CPMU_CTRL_LINK_IDLE_MODE)); +			cpmu_restore = 1; +		} +	} +	tg3_override_clk(tp); +  	if (offset & 3) {  		/* adjustments to start on required 4 byte boundary */  		b_offset = offset & 3; @@ -11810,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,  		}  		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);  		if (ret) -			return ret; +			goto eeprom_done;  		memcpy(data, ((char *)&val) + b_offset, b_count);  		len -= b_count;  		offset += b_count; @@ -11822,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,  	for (i = 0; i < (len - (len & 3)); i += 4) {  		ret = tg3_nvram_read_be32(tp, offset + i, &val);  		if (ret) { +			if (i) +				i -= 4;  			eeprom->len += i; -			return ret; +			goto eeprom_done;  		}  		memcpy(pd + i, &val, 4); +		if (need_resched()) { +			if (signal_pending(current)) { +				eeprom->len += i; +				ret = -EINTR; +				goto eeprom_done; +			} +			cond_resched(); +		}  	}  	eeprom->len += i; @@ -11836,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,  		b_offset = offset + len - b_count;  		ret = tg3_nvram_read_be32(tp, b_offset, &val);  		if (ret) -			return ret; +			goto eeprom_done;  		memcpy(pd, &val, b_count);  		eeprom->len += b_count;  	} -	return 0; +	ret = 0; + +eeprom_done: +	/* Restore clock, link aware and link idle modes */ +	tg3_restore_clk(tp); +	if (cpmu_restore) +		tw32(TG3_CPMU_CTRL, cpmu_val); + +	return ret;  }  static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) @@ -11907,7 +12045,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)  		struct phy_device *phydev;  		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  			return -EAGAIN; -		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +		phydev = tp->mdio_bus->phy_map[tp->phy_addr];  		return phy_ethtool_gset(phydev, cmd);  	} @@ -11974,7 +12112,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)  		struct phy_device *phydev;  		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  			return -EAGAIN; -		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +		phydev = tp->mdio_bus->phy_map[tp->phy_addr];  		return phy_ethtool_sset(phydev, cmd);  	} @@ -12093,12 +12231,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)  	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC); -	spin_lock_bh(&tp->lock);  	if (device_may_wakeup(dp))  		tg3_flag_set(tp, WOL_ENABLE);  	else  		tg3_flag_clear(tp, WOL_ENABLE); -	spin_unlock_bh(&tp->lock);  	return 0;  } @@ -12131,7 +12267,7 @@ static int tg3_nway_reset(struct net_device *dev)  	if (tg3_flag(tp, USE_PHYLIB)) {  		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  			return -EAGAIN; -		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); +		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);  	} else {  		u32 bmcr; @@ -12198,7 +12334,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e  	if (tg3_flag(tp, MAX_RXPEND_64) &&  	    tp->rx_pending > 63)  		tp->rx_pending = 63; -	tp->rx_jumbo_pending = ering->rx_jumbo_pending; + +	if (tg3_flag(tp, JUMBO_RING_ENABLE)) +		tp->rx_jumbo_pending = ering->rx_jumbo_pending;  	for (i = 0; i < tp->irq_max; i++)  		tp->napi[i].tx_pending = ering->tx_pending; @@ -12247,7 +12385,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam  		u32 newadv;  		struct phy_device *phydev; -		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +		phydev = tp->mdio_bus->phy_map[tp->phy_addr];  		if (!(phydev->supported & SUPPORTED_Pause) ||  		    (!(phydev->supported & SUPPORTED_Asym_Pause) && @@ -12394,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)  	return size;  } -static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir) +static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)  {  	struct tg3 *tp = netdev_priv(dev);  	int i; @@ -12405,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)  	return 0;  } -static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir) +static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)  {  	struct tg3 *tp = netdev_priv(dev);  	size_t i; @@ -13194,8 +13332,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)  		return -ENOMEM;  	tx_data = skb_put(skb, tx_len); -	memcpy(tx_data, tp->dev->dev_addr, 6); -	memset(tx_data + 6, 0x0, 8); +	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN); +	memset(tx_data + ETH_ALEN, 0x0, 8);  	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN); @@ -13583,14 +13721,13 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,  } -static int tg3_hwtstamp_ioctl(struct net_device *dev, -			      struct ifreq *ifr, int cmd) +static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)  {  	struct tg3 *tp = netdev_priv(dev);  	struct hwtstamp_config stmpconf;  	if (!tg3_flag(tp, PTP_CAPABLE)) -		return -EINVAL; +		return -EOPNOTSUPP;  	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))  		return -EFAULT; @@ -13598,16 +13735,9 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,  	if (stmpconf.flags)  		return -EINVAL; -	switch (stmpconf.tx_type) { -	case HWTSTAMP_TX_ON: -		tg3_flag_set(tp, TX_TSTAMP_EN); -		break; -	case HWTSTAMP_TX_OFF: -		tg3_flag_clear(tp, TX_TSTAMP_EN); -		break; -	default: +	if (stmpconf.tx_type != HWTSTAMP_TX_ON && +	    stmpconf.tx_type != HWTSTAMP_TX_OFF)  		return -ERANGE; -	}  	switch (stmpconf.rx_filter) {  	case HWTSTAMP_FILTER_NONE: @@ -13669,6 +13799,72 @@ static int tg3_hwtstamp_ioctl(struct net_device *dev,  		tw32(TG3_RX_PTP_CTL,  		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK); +	if (stmpconf.tx_type == HWTSTAMP_TX_ON) +		tg3_flag_set(tp, TX_TSTAMP_EN); +	else +		tg3_flag_clear(tp, TX_TSTAMP_EN); + +	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? +		-EFAULT : 0; +} + +static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ +	struct tg3 *tp = netdev_priv(dev); +	struct hwtstamp_config stmpconf; + +	if (!tg3_flag(tp, PTP_CAPABLE)) +		return -EOPNOTSUPP; + +	stmpconf.flags = 0; +	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ? +			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF); + +	switch (tp->rxptpctl) { +	case 0: +		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ; +		break; +	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ: +		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; +		break; +	default: +		WARN_ON_ONCE(1); +		return -ERANGE; +	} +  	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?  		-EFAULT : 0;  } @@ -13683,7 +13879,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  		struct phy_device *phydev;  		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))  			return -EAGAIN; -		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +		phydev = tp->mdio_bus->phy_map[tp->phy_addr];  		return phy_mii_ioctl(phydev, ifr, cmd);  	} @@ -13726,7 +13922,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)  		return err;  	case SIOCSHWTSTAMP: -		return tg3_hwtstamp_ioctl(dev, ifr, cmd); +		return tg3_hwtstamp_set(dev, ifr); + +	case SIOCGHWTSTAMP: +		return tg3_hwtstamp_get(dev, ifr);  	default:  		/* do nothing */ @@ -13876,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {  	.get_sset_count		= tg3_get_sset_count,  	.get_rxnfc		= tg3_get_rxnfc,  	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size, -	.get_rxfh_indir		= tg3_get_rxfh_indir, -	.set_rxfh_indir		= tg3_set_rxfh_indir, +	.get_rxfh		= tg3_get_rxfh, +	.set_rxfh		= tg3_set_rxfh,  	.get_channels		= tg3_get_channels,  	.set_channels		= tg3_set_channels,  	.get_ts_info		= tg3_get_ts_info, @@ -13956,12 +14155,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)  	tg3_netif_stop(tp); +	tg3_set_mtu(dev, tp, new_mtu); +  	tg3_full_lock(tp, 1);  	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); -	tg3_set_mtu(dev, tp, new_mtu); -  	/* Reset PHY, otherwise the read DMA engine will be in a mode that  	 * breaks all requests to 256 bytes.  	 */ @@ -14847,7 +15046,8 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)  	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);  	if (val == NIC_SRAM_DATA_SIG_MAGIC) {  		u32 nic_cfg, led_cfg; -		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id; +		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0; +		u32 nic_phy_id, ver, eeprom_phy_id;  		int eeprom_phy_serdes = 0;  		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg); @@ -14864,6 +15064,11 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)  		if (tg3_asic_rev(tp) == ASIC_REV_5785)  			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4); +		if (tg3_asic_rev(tp) == ASIC_REV_5717 || +		    tg3_asic_rev(tp) == ASIC_REV_5719 || +		    tg3_asic_rev(tp) == ASIC_REV_5720) +			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5); +  		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==  		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)  			eeprom_phy_serdes = 1; @@ -14921,6 +15126,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)  			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)  				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |  						 LED_CTRL_MODE_PHY_2); + +			if (tg3_flag(tp, 5717_PLUS) || +			    tg3_asic_rev(tp) == ASIC_REV_5762) +				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE | +						LED_CTRL_BLINK_RATE_MASK; +  			break;  		case SHASTA_EXT_LED_MAC: @@ -15010,6 +15221,9 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)  			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);  		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)  			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN); + +		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV) +			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;  	}  done:  	if (tg3_flag(tp, WOL_CAP)) @@ -15105,9 +15319,11 @@ static void tg3_phy_init_link_config(struct tg3 *tp)  {  	u32 adv = ADVERTISED_Autoneg; -	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) -		adv |= ADVERTISED_1000baseT_Half | -		       ADVERTISED_1000baseT_Full; +	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { +		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV)) +			adv |= ADVERTISED_1000baseT_Half; +		adv |= ADVERTISED_1000baseT_Full; +	}  	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))  		adv |= ADVERTISED_100baseT_Half | @@ -15759,9 +15975,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)  		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||  		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||  		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || +		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || +		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||  		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||  		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || -		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) +		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || +		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)  			reg = TG3PCI_GEN2_PRODID_ASICREV;  		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||  			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || @@ -16452,6 +16671,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)  	/* Set these bits to enable statistics workaround. */  	if (tg3_asic_rev(tp) == ASIC_REV_5717 || +	    tg3_asic_rev(tp) == ASIC_REV_5762 ||  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||  	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {  		tp->coalesce_mode |= HOSTCC_MODE_ATTN; @@ -16485,6 +16705,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)  	/* Clear this out for sanity. */  	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); +	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ +	tw32(TG3PCI_REG_BASE_ADDR, 0); +  	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,  			      &pci_state_reg);  	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && @@ -16591,6 +16814,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)  	else  		tg3_flag_clear(tp, POLL_SERDES); +	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF)) +		tg3_flag_set(tp, POLL_CPMU_LINK); +  	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;  	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;  	if (tg3_asic_rev(tp) == ASIC_REV_5701 && @@ -16632,8 +16858,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)  	int len;  	addr = of_get_property(dp, "local-mac-address", &len); -	if (addr && len == 6) { -		memcpy(dev->dev_addr, addr, 6); +	if (addr && len == ETH_ALEN) { +		memcpy(dev->dev_addr, addr, ETH_ALEN);  		return 0;  	}  	return -ENODEV; @@ -16643,7 +16869,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)  {  	struct net_device *dev = tp->dev; -	memcpy(dev->dev_addr, idprom->id_ethaddr, 6); +	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);  	return 0;  }  #endif @@ -17052,10 +17278,6 @@ static int tg3_test_dma(struct tg3 *tp)  	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); -#if 0 -	/* Unneeded, already done by tg3_get_invariants.  */ -	tg3_switch_clocks(tp); -#endif  	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&  	    tg3_asic_rev(tp) != ASIC_REV_5701) @@ -17083,20 +17305,6 @@ static int tg3_test_dma(struct tg3 *tp)  			break;  		} -#if 0 -		/* validate data reached card RAM correctly. */ -		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) { -			u32 val; -			tg3_read_mem(tp, 0x2100 + (i*4), &val); -			if (le32_to_cpu(val) != p[i]) { -				dev_err(&tp->pdev->dev, -					"%s: Buffer corrupted on device! " -					"(%d != %d)\n", __func__, val, i); -				/* ret = -ENODEV here? */ -			} -			p[i] = 0; -		} -#endif  		/* Now read it back. */  		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);  		if (ret) { @@ -17362,8 +17570,10 @@ static int tg3_init_one(struct pci_dev *pdev,  			tg3_flag_set(tp, FLUSH_POSTED_WRITES);  		if (ssb_gige_one_dma_at_once(pdev))  			tg3_flag_set(tp, ONE_DMA_AT_ONCE); -		if (ssb_gige_have_roboswitch(pdev)) +		if (ssb_gige_have_roboswitch(pdev)) { +			tg3_flag_set(tp, USE_PHYLIB);  			tg3_flag_set(tp, ROBOSWITCH); +		}  		if (ssb_gige_is_rgmii(pdev))  			tg3_flag_set(tp, RGMII_MODE);  	} @@ -17409,9 +17619,12 @@ static int tg3_init_one(struct pci_dev *pdev,  	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||  	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||  	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 || +	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 || +	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||  	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||  	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 || -	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) { +	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 || +	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {  		tg3_flag_set(tp, ENABLE_APE);  		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);  		if (!tp->aperegs) { @@ -17478,8 +17691,6 @@ static int tg3_init_one(struct pci_dev *pdev,  	tg3_init_bufmgr_config(tp); -	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; -  	/* 5700 B0 chips do not support checksumming correctly due  	 * to hardware bugs.  	 */ @@ -17511,7 +17722,8 @@ static int tg3_init_one(struct pci_dev *pdev,  			features |= NETIF_F_TSO_ECN;  	} -	dev->features |= features; +	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | +			 NETIF_F_HW_VLAN_CTAG_RX;  	dev->vlan_features |= features;  	/* @@ -17525,6 +17737,7 @@ static int tg3_init_one(struct pci_dev *pdev,  		features |= NETIF_F_LOOPBACK;  	dev->hw_features |= features; +	dev->priv_flags |= IFF_UNICAST_FLT;  	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&  	    !tg3_flag(tp, TSO_CAPABLE) && @@ -17628,7 +17841,7 @@ static int tg3_init_one(struct pci_dev *pdev,  	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {  		struct phy_device *phydev; -		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; +		phydev = tp->mdio_bus->phy_map[tp->phy_addr];  		netdev_info(dev,  			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",  			    phydev->drv->name, dev_name(&phydev->dev)); @@ -17685,7 +17898,6 @@ err_out_free_res:  err_out_disable_pdev:  	if (pci_is_enabled(pdev))  		pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  	return err;  } @@ -17717,7 +17929,6 @@ static void tg3_remove_one(struct pci_dev *pdev)  		free_netdev(dev);  		pci_release_regions(pdev);  		pci_disable_device(pdev); -		pci_set_drvdata(pdev, NULL);  	}  } @@ -17727,10 +17938,12 @@ static int tg3_suspend(struct device *device)  	struct pci_dev *pdev = to_pci_dev(device);  	struct net_device *dev = pci_get_drvdata(pdev);  	struct tg3 *tp = netdev_priv(dev); -	int err; +	int err = 0; + +	rtnl_lock();  	if (!netif_running(dev)) -		return 0; +		goto unlock;  	tg3_reset_task_cancel(tp);  	tg3_phy_stop(tp); @@ -17772,6 +17985,8 @@ out:  			tg3_phy_start(tp);  	} +unlock: +	rtnl_unlock();  	return err;  } @@ -17780,10 +17995,12 @@ static int tg3_resume(struct device *device)  	struct pci_dev *pdev = to_pci_dev(device);  	struct net_device *dev = pci_get_drvdata(pdev);  	struct tg3 *tp = netdev_priv(dev); -	int err; +	int err = 0; + +	rtnl_lock();  	if (!netif_running(dev)) -		return 0; +		goto unlock;  	netif_device_attach(dev); @@ -17807,6 +18024,8 @@ out:  	if (!err)  		tg3_phy_start(tp); +unlock: +	rtnl_unlock();  	return err;  }  #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 70257808aa3..461accaf0aa 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -4,7 +4,7 @@   * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)   * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)   * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2013 Broadcom Corporation. + * Copyright (C) 2007-2014 Broadcom Corporation.   */  #ifndef _T3_H @@ -68,6 +68,9 @@  #define  TG3PCI_DEVICE_TIGON3_5762	 0x1687  #define  TG3PCI_DEVICE_TIGON3_5725	 0x1643  #define  TG3PCI_DEVICE_TIGON3_5727	 0x16f3 +#define  TG3PCI_DEVICE_TIGON3_57764	 0x1642 +#define  TG3PCI_DEVICE_TIGON3_57767	 0x1683 +#define  TG3PCI_DEVICE_TIGON3_57787	 0x1641  /* 0x04 --> 0x2c unused */  #define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM  #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644 @@ -1143,10 +1146,14 @@  #define TG3_CPMU_CLCK_ORIDE		0x00003624  #define  CPMU_CLCK_ORIDE_MAC_ORIDE_EN	 0x80000000 +#define TG3_CPMU_CLCK_ORIDE_ENABLE	0x00003628 +#define  TG3_CPMU_MAC_ORIDE_ENABLE	 (1 << 13) +  #define TG3_CPMU_STATUS			0x0000362c  #define  TG3_CPMU_STATUS_FMSK_5717	 0x20000000  #define  TG3_CPMU_STATUS_FMSK_5719	 0xc0000000  #define  TG3_CPMU_STATUS_FSHFT_5719	 30 +#define  TG3_CPMU_STATUS_LINK_MASK	 0x180000  #define TG3_CPMU_CLCK_STAT		0x00003630  #define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000 @@ -2201,7 +2208,7 @@  #define NIC_SRAM_DATA_CFG_2		0x00000d38 -#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00000400 +#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00004000  #define  SHASTA_EXT_LED_MODE_MASK	 0x00018000  #define  SHASTA_EXT_LED_LEGACY		 0x00000000  #define  SHASTA_EXT_LED_SHARED		 0x00008000 @@ -2223,6 +2230,9 @@  #define  NIC_SRAM_CPMUSTAT_SIG		0x0000362c  #define  NIC_SRAM_CPMUSTAT_SIG_MSK	0x0000ffff +#define NIC_SRAM_DATA_CFG_5		0x00000e0c +#define  NIC_SRAM_DISABLE_1G_HALF_ADV	0x00000002 +  #define NIC_SRAM_RX_MINI_BUFFER_DESC	0x00001000  #define NIC_SRAM_DMA_DESC_POOL_BASE	0x00002000 @@ -2598,7 +2608,11 @@ struct tg3_rx_buffer_desc {  #define RXD_ERR_TOO_SMALL		0x00400000  #define RXD_ERR_NO_RESOURCES		0x00800000  #define RXD_ERR_HUGE_FRAME		0x01000000 -#define RXD_ERR_MASK			0xffff0000 + +#define RXD_ERR_MASK	(RXD_ERR_BAD_CRC | RXD_ERR_COLLISION |		\ +			 RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE |	\ +			 RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL |		\ +			 RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)  	u32				reserved;  	u32				opaque; @@ -3011,6 +3025,7 @@ enum TG3_FLAGS {  	TG3_FLAG_ENABLE_ASF,  	TG3_FLAG_ASPM_WORKAROUND,  	TG3_FLAG_POLL_SERDES, +	TG3_FLAG_POLL_CPMU_LINK,  	TG3_FLAG_MBOX_WRITE_REORDER,  	TG3_FLAG_PCIX_TARGET_HWBUG,  	TG3_FLAG_WOL_SPEED_100MB, @@ -3322,6 +3337,7 @@ struct tg3 {  #define TG3_PHYFLG_1G_ON_VAUX_OK	0x00080000  #define TG3_PHYFLG_KEEP_LINK_ON_PWRDN	0x00100000  #define TG3_PHYFLG_MDIX_STATE		0x00200000 +#define TG3_PHYFLG_DISABLE_1G_HD_ADV	0x00400000  	u32				led_ctrl;  	u32				phy_otp;  | 
