aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c93
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c45
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c58
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c96
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c100
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h197
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c585
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1232
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c53
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c284
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h47
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c171
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c518
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c491
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c45
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c164
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c78
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c395
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c838
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c769
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c187
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1371
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c630
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c523
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c285
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c18
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c100
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1002
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c371
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/enum.h8
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c11
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/nic.c11
-rw-r--r--drivers/net/ethernet/sfc/nic.h18
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
-rw-r--r--drivers/net/ethernet/sfc/selftest.c64
-rw-r--r--drivers/net/ethernet/sfc/siena.c37
-rw-r--r--drivers/net/ethernet/sfc/tx.c93
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c12
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c208
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c61
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
240 files changed, 12214 insertions, 6853 deletions
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a0229..2038eaabaea 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
static int mem_start;
/**
- * el1_probe: - probe for a 3c501
+ * el1_probe - probe for a 3c501
* @dev: The device structure passed in to probe.
*
* This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2e538676924..e1219e037c0 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -162,6 +162,20 @@ config MAC8390
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config MCF8390
+ tristate "ColdFire NS8390 based Ethernet support"
+ depends on COLDFIRE
+ select CRC32
+ ---help---
+ This driver is for Ethernet devices using an NS8390-compatible
+ chipset on many common ColdFire CPU based boards. Many of the older
+ Freescale dev boards use this, and some other common boards like
+ some SnapGear routers do as well.
+
+ If you have one of these boards and want to use the network interface
+ on them then choose Y. To compile this driver as a module, choose M
+ here, the module will be called mcf8390.
+
config NE2000
tristate "NE2000/NE1000 support"
depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index d13790b7fd2..f43038babf8 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 923959275a8..912ed7a5f33 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
- ptrc = (char*)buf;
+ ptrc = buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
new file mode 100644
index 00000000000..230efd6fa5d
--- /dev/null
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -0,0 +1,480 @@
+/*
+ * Support for ColdFire CPU based boards using a NS8390 Ethernet device.
+ *
+ * Derived from the many other 8390 drivers.
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <asm/mcf8390.h>
+
+static const char version[] =
+ "mcf8390.c: (15-06-2012) Greg Ungerer <gerg@uclinux.org>";
+
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */
+#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#ifdef NE2000_ODDOFFSET
+/*
+ * A lot of the ColdFire boards use a separate address region for odd offset
+ * register addresses. The following functions convert and map as required.
+ * Note that the data port accesses are treated a little differently, and
+ * always accessed via the insX/outsX functions.
+ */
+static inline u32 NE_PTR(u32 addr)
+{
+ if (addr & 1)
+ return addr - 1 + NE2000_ODDOFFSET;
+ return addr;
+}
+
+static inline u32 NE_DATA_PTR(u32 addr)
+{
+ return addr;
+}
+
+void ei_outb(u32 val, u32 addr)
+{
+ NE2000_BYTE *rp;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ *rp = RSWAP(val);
+}
+
+#define ei_inb ei_inb
+u8 ei_inb(u32 addr)
+{
+ NE2000_BYTE *rp, val;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ val = *rp;
+ return (u8) (RSWAP(val) & 0xff);
+}
+
+void ei_insb(u32 addr, void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *rp;
+ *buf++ = RSWAP(val);
+ }
+}
+
+void ei_insw(u32 addr, void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *rp;
+ *buf++ = BSWAP(w);
+ }
+}
+
+void ei_outsb(u32 addr, const void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *buf++;
+ *rp = RSWAP(val);
+ }
+}
+
+void ei_outsw(u32 addr, const void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *buf++;
+ *rp = BSWAP(w);
+ }
+}
+
+#else /* !NE2000_ODDOFFSET */
+
+#define ei_inb inb
+#define ei_outb outb
+#define ei_insb insb
+#define ei_insw insw
+#define ei_outsb outsb
+#define ei_outsw outsw
+
+#endif /* !NE2000_ODDOFFSET */
+
+#define ei_inb_p ei_inb
+#define ei_outb_p ei_outb
+
+#include "lib8390.c"
+
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void mcf8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ u32 addr = dev->base_addr;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+
+ ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RESET, addr + NE_EN0_ISR);
+}
+
+/*
+ * This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+static void mcf8390_dmaing_err(const char *func, struct net_device *dev,
+ struct ei_device *ei_local)
+{
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ func, ei_local->dmaing, ei_local->irqlock);
+}
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+static void mcf8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO);
+ ei_outb(0, addr + NE_EN0_RCNTHI);
+ ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */
+ ei_outb(ring_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1);
+
+ outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+
+ hdr->count = cpu_to_le16(hdr->count);
+}
+
+/*
+ * Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
+static void mcf8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO);
+ ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = ei_inb(addr + NE_DATAPORT);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static void mcf8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ unsigned long dma_start;
+
+ /* Make sure we transfer all bytes if 16bit IO writes */
+ if (count & 0x1)
+ count++;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(0x00, addr + NE_EN0_RSARLO);
+ ei_outb(start_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD);
+
+ ei_outsw(addr + NE_DATAPORT, buf, count >> 1);
+
+ dma_start = jiffies;
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ mcf8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static const struct net_device_ops mcf8390_netdev_ops = {
+ .ndo_open = __ei_open,
+ .ndo_stop = __ei_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int mcf8390_init(struct net_device *dev)
+{
+ static u32 offsets[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ };
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char SA_prom[32];
+ u32 addr = dev->base_addr;
+ int start_page, stop_page;
+ int i, ret;
+
+ mcf8390_reset_8390(dev);
+
+ /*
+ * Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
+ ei_outb(program_seq[i].value,
+ addr + program_seq[i].offset);
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = ei_inb(addr + NE_DATAPORT);
+ ei_inb(addr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ ei_outb(0x49, addr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
+
+ ei_local->name = "mcf8390";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = 1;
+ ei_local->rx_start_page = start_page + TX_PAGES;
+ ei_local->reset_8390 = mcf8390_reset_8390;
+ ei_local->block_input = mcf8390_block_input;
+ ei_local->block_output = mcf8390_block_output;
+ ei_local->get_8390_hdr = mcf8390_get_8390_hdr;
+ ei_local->reg_offset = offsets;
+
+ dev->netdev_ops = &mcf8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ ret = register_netdev(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
+
+ netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n",
+ addr, dev->irq, dev->dev_addr);
+ return 0;
+}
+
+static int mcf8390_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ei_device *ei_local;
+ struct resource *mem, *irq;
+ resource_size_t msize;
+ int ret;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ dev_err(&pdev->dev, "no IRQ specified?\n");
+ return -ENXIO;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "no memory address specified?\n");
+ return -ENXIO;
+ }
+ msize = resource_size(mem);
+ if (!request_mem_region(mem->start, msize, pdev->name))
+ return -EBUSY;
+
+ dev = ____alloc_ei_netdev(0);
+ if (dev == NULL) {
+ release_mem_region(mem->start, msize);
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ ei_local = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ dev->base_addr = mem->start;
+
+ ret = mcf8390_init(dev);
+ if (ret) {
+ release_mem_region(mem->start, msize);
+ free_netdev(dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int mcf8390_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ unregister_netdev(dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver mcf8390_drv = {
+ .driver = {
+ .name = "mcf8390",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcf8390_probe,
+ .remove = mcf8390_remove,
+};
+
+module_platform_driver(mcf8390_drv);
+
+MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver");
+MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mcf8390");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 34850117808..9c77c736f17 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
struct greth_regs *regs;
greth = netdev_priv(dev);
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
goto error1;
}
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f500ee..7203b522f23 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
- (char *)lp->rx_buf_ptr_cpu[entry], len);
+ lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
- cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e840884..5c728436b85 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
lp->rx_skbuff[i] = skb;
- if (skb) {
- skb->dev = dev;
+ if (skb)
rx_buff = skb->data;
- } else
+ else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
if (rx_buff == NULL)
lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff8645ab..a92ddee7f66 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
* bits are reversed.
*/
- addr = (void *)MACE_PROM;
+ addr = MACE_PROM;
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c73859d4..21e261ffbe1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -199,7 +199,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw)
err = atl1c_get_permanent_address(hw);
if (err)
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
return err;
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
int atl1c_phy_init(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
/* select one link mode to get lower power consumption */
int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl, mac_ctrl, phy_ctrl;
u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 17d935bdde0..21d8c4dbdbe 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -74,6 +74,8 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
#define L2CB_V10 0xc0
#define L2CB_V11 0xc1
+#define L2CB_V20 0xc0
+#define L2CB_V21 0xc1
/* register definition */
#define REG_DEVICE_CAP 0x5C
@@ -87,6 +89,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define LINK_CTRL_L1_EN 0x02
#define LINK_CTRL_EXT_SYNC 0x80
+#define REG_PCIE_IND_ACC_ADDR 0x80
+#define REG_PCIE_IND_ACC_DATA 0x84
+
#define REG_DEV_SERIALNUM_CTRL 0x200
#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
#define REG_DEV_MAC_SEL_SHIFT 0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1f78b63d5ef..1bf5bbfe778 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
msleep(5);
}
-/*
+/**
* atl1c_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1c_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
return data;
}
-/*
+/**
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -360,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
}
-/*
+/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -373,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->common_task);
}
-/*
+/**
* atl1c_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -452,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -517,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -576,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
atl1c_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1c_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -632,12 +626,6 @@ out:
return retval;
}
-/*
- * atl1c_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -650,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl1c_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -739,6 +727,8 @@ static const struct atl1c_platform_patch plats[] __devinitdata = {
static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
+ u32 misc_ctrl;
int i = 0;
hw->msi_lnkpatch = false;
@@ -753,8 +743,20 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
}
i++;
}
+
+ if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
+ hw->revision_id == L2CB_V21) {
+ /* config acess mode */
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
+ misc_ctrl &= ~0x100;
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
+ }
}
-/*
+/**
* atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
* @adapter: board private structure to initialize
*
@@ -780,7 +782,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
- AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
hw->revision_id = revision & 0xFF;
/* before link up, we assume hibernate is true */
hw->hibernate = true;
@@ -852,7 +854,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
-/*
+/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
@@ -877,7 +879,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
tpd_ring->next_to_use = 0;
}
-/*
+/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
@@ -930,7 +932,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -953,7 +955,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -988,12 +990,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
rfd_ring->buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += rfd_ring->count;
rx_desc_count += rfd_ring->count;
@@ -1226,7 +1228,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 ctrl_data = 0;
@@ -1362,7 +1364,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
return;
}
-/*
+/**
* atl1c_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1476,7 +1478,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1530,8 +1532,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1558,11 +1559,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
return true;
}
-/*
+/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1c_intr(int irq, void *data)
{
@@ -1813,9 +1813,8 @@ rrs_checked:
atl1c_alloc_rx_buffer(adapter);
}
-/*
+/**
* atl1c_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2270,7 +2269,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
atl1c_reset_dma_ring(adapter);
}
-/*
+/**
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2309,7 +2308,7 @@ err_up:
return err;
}
-/*
+/**
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2432,7 +2431,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
@@ -2579,7 +2578,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2605,7 +2604,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-/*
+/**
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2633,7 +2632,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2661,7 +2660,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2704,7 +2703,7 @@ static struct pci_driver atl1c_driver = {
.driver.pm = &atl1c_pm_ops,
};
-/*
+/**
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
@@ -2715,7 +2714,7 @@ static int __init atl1c_init_module(void)
return pci_register_driver(&atl1c_driver);
}
-/*
+/**
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f9ebb..82b23861bf5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
if (eeprom_buff == NULL)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e511ced..a98acc8a956 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
};
-/*
+/**
* atl1e_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1e_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
AT_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
* @netdev: network interface device structure
*/
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
cancel_work_sync(&adapter->link_chg_task);
}
-/*
+/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1e_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1e_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
}
-/*
- * atl1e_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -505,12 +499,6 @@ out:
}
-/*
- * atl1e_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
msleep(1);
}
-/*
+/**
* atl1e_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
* @adapter: board private structure to initialize
*
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
ring_count);
}
-/*
+/**
* atl1e_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
{
struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
+ &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
u16 i, j;
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -884,14 +871,12 @@ failed:
return err;
}
-static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
- struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
- struct atl1e_tx_ring *tx_ring =
- (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_rx_page_desc *rx_page_desc = NULL;
int i, j;
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 dev_ctrl_data = 0;
u32 max_pay_load = 0;
u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 rxf_len = 0;
u32 rxf_low = 0;
u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
-/*
+/**
* atl1e_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
return true;
}
-/*
+/**
* atl1e_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1e_intr(int irq, void *data)
{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
u8 rx_using = rx_page_desc[que].rx_using;
- return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+ return &(rx_page_desc[que].rx_page[rx_using]);
}
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
struct net_device *netdev = adapter->netdev;
- struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
- &adapter->rx_ring;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc =
(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
tx_ring->next_to_use = 0;
memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
- return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+ return &tx_ring->desc[next_to_use];
}
static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
}
-/*
+/**
* atl1e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2007,7 +1988,7 @@ err_req_irq:
return err;
}
-/*
+/**
* atl1e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc) {
/* get link status */
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
mii_advertise_data = ADVERTISE_10HALF;
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
msleep(100);
atl1e_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
+ &mii_bmsr_data);
if (mii_bmsr_data & BMSR_LSTATUS)
break;
}
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1e_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1e_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/*
+/**
* atl1e_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1e_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1e_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
.err_handler = &atl1e_err_handler
};
-/*
+/**
* atl1e_init_module - Driver Registration Routine
*
* atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
return pci_register_driver(&atl1e_driver);
}
-/*
+/**
* atl1e_exit_module - Driver Exit Cleanup Routine
*
* atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6e7ef..b5086f1e637 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
return -1;
}
-/*
+/**
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884e508..7bae2ad7a7c 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
return -1;
}
-/*
+/**
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
@@ -538,7 +538,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
u16 i;
if (atl1_get_permanent_address(hw)) {
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
ret = 1;
}
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
}
-/*
+/**
* atl1_sw_init - Initialize general software structures (struct atl1_adapter)
* @adapter: board private structure to initialize
*
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
atl1_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
-/*
+/**
* atl1_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (tpd_ring->buffer_info + tpd_ring->count);
/*
* real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_rx_ring - Free RFD Buffers
* @adapter: board private structure
*/
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
*/
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
atomic_set(&tpd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
-/*
+/**
* atl1_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
}
}
-/*
+/**
* atl1_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
*/
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
return 1;
}
-/*
+/**
* atl1_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1_intr(int irq, void *data)
{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
}
-/*
+/**
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
netif_device_attach(netdev);
}
-/*
+/**
* atl1_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl1_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2762,7 +2755,7 @@ err_up:
return err;
}
-/*
+/**
* atl1_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
#endif
};
-/*
+/**
* atl1_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
return err;
}
-/*
+/**
* atl1_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
.driver.pm = ATL1_PM_OPS,
};
-/*
+/**
* atl1_exit_module - Driver Exit Cleanup Routine
*
* atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
pci_unregister_driver(&atl1_driver);
}
-/*
+/**
* atl1_init_module - Driver Registration Routine
*
* atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc406b2..57d64b80fd7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
static void atl2_check_options(struct atl2_adapter *adapter);
-/*
+/**
* atl2_sw_init - Initialize general software structures (struct atl2_adapter)
* @adapter: board private structure to initialize
*
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
adapter->txs_next_clear = 0;
}
-/*
+/**
* atl2_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
return value;
}
-/*
+/**
* atl2_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
ATL2_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl2_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
spin_unlock(&adapter->stats_lock);
}
-/*
+/**
* atl2_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl2_intr(int irq, void *data)
{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
netdev);
}
-/*
+/**
* atl2_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
adapter->ring_dma);
}
-/*
+/**
* atl2_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
#endif
}
-/*
+/**
* atl2_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/*
+/**
* atl2_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl2_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/*
- * atl2_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return 0;
}
-/*
- * atl2_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
}
}
-/*
+/**
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
#endif
};
-/*
+/**
* atl2_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
return err;
}
-/*
+/**
* atl2_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
.shutdown = atl2_shutdown,
};
-/*
+/**
* atl2_init_module - Driver Registration Routine
*
* atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
}
module_init(atl2_init_module);
-/*
+/**
* atl2_exit_module - Driver Exit Cleanup Routine
*
* atl2_exit_module is called just before the driver is removed
@@ -2360,7 +2346,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw)
{
if (get_permanent_address(hw)) {
/* for test */
- /* FIXME: shouldn't we use random_ether_addr() here? */
+ /* FIXME: shouldn't we use eth_random_addr() here? */
hw->perm_mac_addr[0] = 0x00;
hw->perm_mac_addr[1] = 0x13;
hw->perm_mac_addr[2] = 0x74;
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
return -1;
}
-/*
+/**
* atl2_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa49a7f..77ffbc4a507 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atlx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
schedule_work(&adapter->link_chg_task);
}
-/*
+/**
* atlx_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
ioread32(adapter->hw.hw_addr + REG_IMR);
}
-/*
+/**
* atlx_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
adapter->int_enabled = true;
}
-/*
+/**
* atlx_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/*
+/**
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index d09c6b583d1..9786c0e9890 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -483,9 +483,11 @@ out:
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
- u32 *val;
+ u64 *val;
val = &bp->hw_stats.tx_good_octets;
+ u64_stats_update_begin(&bp->hw_stats.syncp);
+
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
@@ -496,6 +498,8 @@ static void b44_stats_update(struct b44 *bp)
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
+
+ u64_stats_update_end(&bp->hw_stats.syncp);
}
static void b44_link_report(struct b44 *bp)
@@ -1635,44 +1639,49 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *b44_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
-
- /* Convert HW stats into netdevice stats. */
- nstat->rx_packets = hwstat->rx_pkts;
- nstat->tx_packets = hwstat->tx_pkts;
- nstat->rx_bytes = hwstat->rx_octets;
- nstat->tx_bytes = hwstat->tx_octets;
- nstat->tx_errors = (hwstat->tx_jabber_pkts +
- hwstat->tx_oversize_pkts +
- hwstat->tx_underruns +
- hwstat->tx_excessive_cols +
- hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
- nstat->collisions = hwstat->tx_total_cols;
-
- nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
- hwstat->rx_undersize);
- nstat->rx_over_errors = hwstat->rx_missed_pkts;
- nstat->rx_frame_errors = hwstat->rx_align_errs;
- nstat->rx_crc_errors = hwstat->rx_crc_errs;
- nstat->rx_errors = (hwstat->rx_jabber_pkts +
- hwstat->rx_oversize_pkts +
- hwstat->rx_missed_pkts +
- hwstat->rx_crc_align_errs +
- hwstat->rx_undersize +
- hwstat->rx_crc_errs +
- hwstat->rx_align_errs +
- hwstat->rx_symbol_errs);
-
- nstat->tx_aborted_errors = hwstat->tx_underruns;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+
+ /* Convert HW stats into rtnl_link_stats64 stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
#if 0
- /* Carrier lost counter seems to be broken for some devices */
- nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
return nstat;
}
@@ -1993,17 +2002,24 @@ static void b44_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct b44 *bp = netdev_priv(dev);
- u32 *val = &bp->hw_stats.tx_good_octets;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
u32 i;
spin_lock_irq(&bp->lock);
-
b44_stats_update(bp);
+ spin_unlock_irq(&bp->lock);
- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
- *data++ = *val++;
+ do {
+ data_src = &hwstat->tx_good_octets;
+ data_dst = data;
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
- spin_unlock_irq(&bp->lock);
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data_dst++ = *data_src++;
+
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2113,7 +2129,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_open = b44_open,
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
- .ndo_get_stats = b44_get_stats,
+ .ndo_get_stats64 = b44_get_stats64,
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a49279..8993d72f042 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -338,9 +338,10 @@ struct ring_info {
* the layout
*/
struct b44_hw_stats {
-#define _B44(x) u32 x;
+#define _B44(x) u64 x;
B44_STAT_REG_DECLARE
#undef _B44
+ struct u64_stats_sync syncp;
};
struct ssb_device;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 1fa4927a45b..79cebd8525c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
@@ -57,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.1"
-#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define DRV_MODULE_VERSION "2.2.3"
+#define DRV_MODULE_RELDATE "June 27, 2012"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
pr_cont(" condition[%08x]\n",
bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
DP_SHMEM_LINE(bp, 0x3cc);
DP_SHMEM_LINE(bp, 0x3dc);
DP_SHMEM_LINE(bp, 0x3ec);
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
- int cpus = num_online_cpus();
+ int cpus = netif_get_num_default_rss_queues();
int msix_vecs;
if (!bp->num_req_rx_rings)
@@ -6383,6 +6388,7 @@ bnx2_reset_task(struct work_struct *work)
{
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
int rc;
+ u16 pcicmd;
rtnl_lock();
if (!netif_running(bp->dev)) {
@@ -6392,6 +6398,12 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp, true);
+ pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
+ if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+ /* in case PCI block has reset */
+ pci_restore_state(bp->pdev);
+ pci_save_state(bp->pdev);
+ }
rc = bnx2_init_nic(bp, 1);
if (rc) {
netdev_err(bp->dev, "failed to reset NIC, closing\n");
@@ -6406,6 +6418,75 @@ bnx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+ int i;
+ u32 reg, bdidx, cid, valid;
+ struct net_device *dev = bp->dev;
+ static const struct ftq_reg {
+ char *name;
+ u32 off;
+ } ftq_arr[] = {
+ BNX2_FTQ_ENTRY(RV2P_P),
+ BNX2_FTQ_ENTRY(RV2P_T),
+ BNX2_FTQ_ENTRY(RV2P_M),
+ BNX2_FTQ_ENTRY(TBDR_),
+ BNX2_FTQ_ENTRY(TDMA_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TPAT_),
+ BNX2_FTQ_ENTRY(RXP_C),
+ BNX2_FTQ_ENTRY(RXP_),
+ BNX2_FTQ_ENTRY(COM_COMXQ_),
+ BNX2_FTQ_ENTRY(COM_COMTQ_),
+ BNX2_FTQ_ENTRY(COM_COMQ_),
+ BNX2_FTQ_ENTRY(CP_CPQ_),
+ };
+
+ netdev_err(dev, "<--- start FTQ dump --->\n");
+ for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+ netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+ bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+ netdev_err(dev, "CPU states:\n");
+ for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+ netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+ reg, bnx2_reg_rd_ind(bp, reg),
+ bnx2_reg_rd_ind(bp, reg + 4),
+ bnx2_reg_rd_ind(bp, reg + 8),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x20));
+
+ netdev_err(dev, "<--- end FTQ dump --->\n");
+ netdev_err(dev, "<--- start TBDC dump --->\n");
+ netdev_err(dev, "TBDC free cnt: %ld\n",
+ REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
+ for (i = 0; i < 0x20; i++) {
+ int j = 0;
+
+ REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+ j++;
+
+ cid = REG_RD(bp, BNX2_TBDC_CID);
+ bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
+ valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
+ i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+ bdidx >> 24, (valid >> 8) & 0x0ff);
+ }
+ netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
static void
bnx2_dump_state(struct bnx2 *bp)
{
@@ -6435,6 +6516,7 @@ bnx2_tx_timeout(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ bnx2_dump_ftq(bp);
bnx2_dump_state(bp);
bnx2_dump_mcp_state(bp);
@@ -6628,6 +6710,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
+ netif_tx_disable(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
@@ -7832,7 +7915,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
else
strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
- if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ if (bp->func == 0) {
switch (strap) {
case 0x4:
case 0x5:
@@ -8131,9 +8214,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+ if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+ bp->func = 1;
+
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG) {
- u32 off = PCI_FUNC(pdev->devfn) << 2;
+ u32 off = bp->func << 2;
bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda73be..af6451dec29 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+/*
+ * tbdc definition
+ * offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND 0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
+
+#define BNX2_TBDC_STATUS 0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR 0x5424
+
+#define BNX2_TBDC_BIDX 0x542c
+#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
+
+#define BNX2_TBDC_CID 0x5430
+
+#define BNX2_TBDC_CAM_OPCODE 0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
+
/*
* tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
int irq_nvecs;
+ u8 func;
+
u8 num_tx_rings;
u8 num_rx_rings;
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
(msg))
+#define BNX2_BC_RESET_TYPE 0x000001c0
+
#define BNX2_BC_STATE 0x000001c4
#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de82418497..77bcd4cb4ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.50-0"
-#define DRV_MODULE_RELDATE "2012/04/23"
+#define DRV_MODULE_VERSION "1.72.51-0"
+#define DRV_MODULE_RELDATE "2012/06/18"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID 48
-enum {
+#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+ (bp)->max_cos)
/* iSCSI L2 */
- BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
- BNX2X_FCOE_ETH_CID,
-};
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
#define FIRST_TX_ONLY_COS_INDEX 1
#define FIRST_TX_COS_INDEX 0
-/* defines for decodeing the fastpath index and the cos index out of the
- * transmission queue index
- */
-#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
-
-#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
-#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
-
/* rules for calculating the cids of tx-only connections */
-#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
-#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */
-#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
-
-/*
- * 0..15 eth cos0
- * 16..31 eth cos1 if applicable
- * 32..47 eth cos2 If applicable
- * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
*/
-#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
-#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#ifdef BCM_CNIC
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+#endif
/* fast path */
/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ bool l4_rxhash;
u16 gro_size;
u16 full_page;
};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
__le16 *tx_cons_sb;
int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
};
enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
enum bnx2x_tpa_mode_t mode;
u8 max_cos; /* actual number of active tx coses */
- struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
rx_calls;
/* TPA related */
- struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
-
- struct tstorm_per_queue_stats old_tclient;
- struct ustorm_per_queue_stats old_uclient;
- struct xstorm_per_queue_stats old_xclient;
- struct bnx2x_eth_q_stats eth_q_stats;
- struct bnx2x_eth_q_stats_old eth_q_stats_old;
-
/* The size is calculated using the following:
sizeof name field from netdev structure +
4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
-
- /* MACs object */
- struct bnx2x_vlan_mac_obj mac_obj;
-
- /* Queue State object */
- struct bnx2x_queue_sp_obj q_obj;
-
};
-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-/* FCoE L2 `fastpath' entry is right after the eth entries */
-#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
-#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
-#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
-#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
- txdata[FIRST_TX_COS_INDEX].var)
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
#define IS_ETH_FP(fp) (fp->index < \
BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
@@ -616,6 +614,22 @@ struct bnx2x_fastpath {
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds) \
+ (((bds) + MAX_TX_DESC_CNT - 1) / \
+ MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD - describes packed
+ * START_BD(splitted) - includes unpaged data segment for GSO
+ * PARSING_BD - for TSO and CSUM data
+ * Frag BDs - decribes pages for frags
+ */
+#define BDS_PER_TX_PKT 3
+#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -805,8 +819,11 @@ struct bnx2x_common {
#define CHIP_NUM_57810_MF 0x16ae
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840 0x168d
-#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
@@ -818,8 +835,12 @@ struct bnx2x_common {
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
-#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
-#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_57840(bp) \
+ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
@@ -978,8 +999,8 @@ union cdu_context {
};
/* CDU host DB constants */
-#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -1182,11 +1203,31 @@ struct bnx2x_prev_path_list {
struct list_head list;
};
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1301,7 +1342,9 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1420,7 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1389,6 +1433,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1465,11 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- struct hw_context context;
+ /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1482,14 @@ struct bnx2x {
/*
* Maximum CID count that might be required by the bnx2x:
- * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
-#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
- NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1598,6 +1648,8 @@ struct bnx2x {
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1708,9 @@ struct bnx2x_func_init_params {
continue; \
else
+#define for_each_napi_rx_queue(bp, var) \
+ for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
+
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1709,15 +1764,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
/**
- * Deletes all MACs configured for the specific MAC object.
- *
- * @param bp Function driver instance
- * @param mac_obj MAC object to cleanup
- *
- * @return zero if all MACs were cleaned
- */
-
-/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
* @bp: driver handle
@@ -1817,6 +1863,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
@@ -1899,13 +1946,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED 0xf0000
#define PCICFG_LINK_SPEED_SHIFT 16
-
-#define BNX2X_NUM_TESTS 7
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
#define BNX2X_PHY_LOOPBACK_FAILED 1
#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704..e879e19eb0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
* Makes sure the contents of the bp->fp[to].napi is kept
* intact. This is done by first copying the napi struct from
* the target to the source, and then mem copying the entire
- * source onto the target
+ * source onto the target. Update txdata pointers and related
+ * content.
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[old_txdata_index],
+ &bp->bnx2x_txq[new_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
- const struct eth_fast_path_rx_cqe *cqe)
+ const struct eth_fast_path_rx_cqe *cqe,
+ bool *l4_rxhash)
{
/* Set Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE);
return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *l4_rxhash = false;
return 0;
}
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
skb->rxhash = tpa_info->rxhash;
+ skb->l4_rxhash = tpa_info->l4_rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +657,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
-static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
- struct bnx2x_fastpath *fp)
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
{
/* Do nothing if no IP/L4 csum validation was done */
@@ -632,7 +674,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
if (cqe->fast_path_cqe.type_error_flags &
(ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
- fp->eth_q_stats.hw_csum_err++;
+ qstats->hw_csum_err++;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -679,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
+ bool l4_rxhash;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -776,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -789,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx;
}
memcpy(skb->data, data + pad, len);
@@ -803,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0);
if (unlikely(!skb)) {
kfree(data);
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
goto next_rx;
}
skb_reserve(skb, pad);
} else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
@@ -821,13 +865,14 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
+ skb->l4_rxhash = l4_rxhash;
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM)
- bnx2x_csum_validate(skb, cqe, fp);
-
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue);
@@ -888,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
sw_cons++;
}
netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev, txdata->txq_index));
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
}
}
}
@@ -1325,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
free_irq(bp->dev->irq, bp->dev);
}
-int __devinit bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
int msix_vec = 0, i, rc, req_cnt;
@@ -1579,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
/**
@@ -1607,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
- tx = MAX_TXQS_PER_COS * bp->max_cos;
- rx = BNX2X_NUM_ETH_QUEUES(bp);
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
/* account for fcoe queue */
#ifdef BCM_CNIC
@@ -1666,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
int i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is
* enabled
*/
- for (i = 0; i < sizeof(ind_table); i++)
- ind_table[i] =
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
bp->fp->cl_id +
ethtool_rxfh_indir_default(i, num_eth_queues);
@@ -1685,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
* For 57712 and newer on the other hand it's a per-function
* configuration.
*/
- return bnx2x_config_rss_eth(bp, ind_table,
- bp->port.pmf || !CHIP_IS_E1x(bp));
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash)
+ bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
int i;
@@ -1713,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
@@ -1754,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1765,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
- rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
+
+ int cos;
struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init)
+ if (bp->stats_init) {
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
- else {
+ } else {
/* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL);
if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL);
if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) {
- memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
+ memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats);
}
if (tmp_eth_q_stats_old) {
- memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old);
}
@@ -1891,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
-
+ fp->tpa_info = orig_tpa_info;
fp->bp = bp;
fp->index = index;
if (IS_ETH_FP(fp))
@@ -1900,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Special queues support only one CoS */
fp->max_cos = 1;
+ /* Init txdata pointers */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+#endif
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
/*
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa.
+ * Also set fp->disable_tpa and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
+ sizeof(struct bnx2x_fp_txdata));
/* Set the receive queues buffer size */
@@ -2176,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
bp->state = BNX2X_STATE_DIAG;
break;
@@ -2195,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
@@ -2215,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
- bnx2x_dcbx_init(bp);
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Stop Tx */
bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
#endif
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
- bnx2x_tx_int(bp, &fp->txdata[cos]);
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
@@ -2844,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, txq_index, fp_index, txdata_index;
+ int nbd, txq_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2863,39 +2936,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
- /* decode the fastpath index and the cos index from the txq */
- fp_index = TXQ_TO_FP(txq_index);
- txdata_index = TXQ_TO_COS(txq_index);
-
-#ifdef BCM_CNIC
- /*
- * Override the above for the FCoE queue:
- * - FCoE fp entry is right after the ETH entries.
- * - FCoE L2 queue uses bp->txdata[0] only.
- */
- if (unlikely(!NO_FCOE(bp) && (txq_index ==
- bnx2x_fcoe_tx(bp, txq_index)))) {
- fp_index = FCOE_IDX;
- txdata_index = 0;
- }
-#endif
+ txdata = &bp->bnx2x_txq[txq_index];
/* enable this debug print to view the transmission queue being used
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* locate the fastpath and the txdata */
- fp = &bp->fp[fp_index];
- txdata = &fp->txdata[txdata_index];
-
/* enable this debug print to view the tranmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
- (skb_shinfo(skb)->nr_frags + 3))) {
- fp->eth_q_stats.driver_xoff++;
+ skb_shinfo(skb)->nr_frags +
+ BDS_PER_TX_PKT +
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -3169,7 +3225,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3177,8 +3233,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */
smp_mb();
- fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
@@ -3243,7 +3299,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure traffic class to transmission queue mapping */
for (cos = 0; cos < bp->max_cos; cos++) {
count = BNX2X_NUM_ETH_QUEUES(bp);
- offset = cos * MAX_TXQS_PER_COS;
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
netdev_set_tc_queue(dev, cos, count, offset);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3398,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3470,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt;
}
@@ -3499,7 +3555,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFUP,
"allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3638,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
/* we will fail load process instead of mark
* NO_FCOE_FLAG
*/
@@ -3607,7 +3663,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
*/
/* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
#endif
bp->num_queues -= delta;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3675,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
+ kfree(bp->fp->tpa_info);
kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
kfree(bp->msix_table);
kfree(bp->ilt);
}
@@ -3630,6 +3690,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
+ int fp_array_size;
+ int i;
/*
* The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3700,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
- sizeof(*fp), GFP_KERNEL);
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+
+ fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
+ for (i = 0; i < fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
bp->fp = fp;
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
+#ifdef BCM_CNIC
+ bp->bnx2x_txq_size++;
+#endif
+ bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
+ sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
/* msix table */
tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347..dfa757e7429 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
+extern int int_mode;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
*
* @bp: driver handle
- * @rss_obj RSS object to use
+ * @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash);
+ bool config_hash);
/**
* bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
* @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
#endif
/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*
* @bp: driver handle
*/
-void bnx2x_dcbx_init(struct bnx2x *bp);
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
/**
* bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
* fills msix_table, requests vectors, updates num_queues
* according to number of available vectors.
*/
-int __devinit bnx2x_enable_msix(struct bnx2x *bp);
+int bnx2x_enable_msix(struct bnx2x *bp);
/**
* bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
u8 cos;
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
return true;
return false;
}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
+ bp->num_napi_queues = bp->num_queues;
+
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+void bnx2x_set_int_mode(struct bnx2x *bp);
+
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
{
return num_queues ?
min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
}
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
return 2 * vn + BP_PORT(bp);
}
-static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
- bool config_hash)
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
- config_hash);
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
}
/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
- bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
- BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
}
static inline void bnx2x_init_txdata(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
- __le16 *tx_cons_sb)
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
{
txdata->cid = cid;
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
- /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
- * 16 ETH clients per function when CNIC is enabled!
- *
- * Fix it ASAP!!!
- */
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-
- bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1);
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd753..8a73374e52a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_default_priority = 0;
}
-void bnx2x_dcbx_init(struct bnx2x *bp)
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
{
u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
if (bp->dcbx_enabled <= 0)
return;
/* validate:
* chip of good for dcbx version,
* dcb is wanted
- * the function is pmf
* shmem2 contains DCBX support fields
*/
DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_admin_mib_updated_params(bp,
- dcbx_lldp_params_offset);
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
}
}
}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
+ if (netif_running(bp->dev))
+ bnx2x_dcbx_init(bp, true);
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee5c5a..fc4e0e3885b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
int port_type;
u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
case ETH_PHY_XFP_FIBER:
case ETH_PHY_KR:
case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
+ if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
+ ETH_PHY_SFP_1G_FIBER) {
+ cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+ cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- u32 speed;
+ u32 speed, phy_idx;
if (IS_MF_SD(bp))
return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
"10G half not supported\n");
return -EINVAL;
}
-
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full)) {
+ & SUPPORTED_10000baseT_Full) ||
+ (bp->link_params.phy[phy_idx].media_type ==
+ ETH_PHY_SFP_1G_FIBER)) {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS;
+ info->testinfo_len = BNX2X_NUM_TESTS(bp);
info->eedump_len = bp->common.flash_size;
info->regdump_len = bnx2x_get_regs_len(dev);
}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return rc;
}
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0, phy_idx;
+ u8 *user_data = data;
+ int remaining_len = ee->len, xfer_size;
+ unsigned int page_off = ee->offset;
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ while (!rc && remaining_len > 0) {
+ xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : remaining_len;
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ page_off,
+ xfer_size,
+ user_data);
+ remaining_len -= xfer_size;
+ user_data += xfer_size;
+ page_off += xfer_size;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx;
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_DA_TWINAX:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
u32 cmd_flags)
{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static const struct {
- char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
- { "register_test (offline)" },
- { "memory_test (offline)" },
- { "loopback_test (offline)" },
- { "nvram_test (online)" },
- { "interrupt_test (online)" },
- { "link_test (online)" },
- { "idle check (online)" }
+static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
};
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisment is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propogate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
}
}
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
- struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 len;
int rc = -ENODEV;
u8 *data;
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
return -EINVAL;
+ }
break;
case BNX2X_MAC_LOOPBACK:
if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
return rc;
}
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
#define CRC32_RESIDUAL 0xdebb20e3
static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV;
}
- params.q_obj = &bp->fp->q_obj;
+ params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
+ int rc;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test when interface is down\n");
return;
+ }
- /* offline tests are not supported in MF mode */
- if (IS_MF(bp))
- etest->flags &= ~ETH_TEST_FL_OFFLINE;
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
- if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_DIAG);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
/* wait until link state is restored */
bnx2x_wait_for_link(bp, 1, is_serdes);
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp);
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
- buf[3] = 1;
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_test_intr(bp) != 0) {
- buf[4] = 1;
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
return num_stats;
case ETH_SS_TEST:
- return BNX2X_NUM_TESTS;
+ return BNX2X_NUM_TESTS(bp);
default:
return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k;
+ int i, j, k, offset, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
break;
case ETH_SS_TEST:
- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
+ i++, j++) {
+ offset = sprintf(buf+32*i, "%s",
+ bnx2x_tests_str_arr[j]);
+ *(buf+offset) = '\0';
+ }
break;
}
}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ } else {
+ return 0;
+ }
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, ind_table, false);
+ return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_del_all_napi(bp);
+ bnx2x_disable_msi(bp);
+ BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bnx2x_set_int_mode(bp);
+ bnx2x_add_all_napi(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,17 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77aa721..bbc66ced9c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
-/**
- * This file defines HSI constants common to all microcode flows
- */
+/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8ba85f..76b6e65790f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
#define BNX2X_HSI_H
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
@@ -33,12 +34,6 @@ struct license_key {
u32 reserved_b[4];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
-#define NVM_PATH_MAX 2
-
/****************************************************************************
* Shared HW configuration *
****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
uses the same defines as link_config */
u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
};
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
+ #define LINK_STATUS_NONE (0<<0)
#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
#define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
u32 port_stx;
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
#define DRV_STATUS_DRV_INFO_REQ 0x04000000
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
#define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
};
@@ -2599,6 +2654,9 @@ struct host_port_stats {
u32 pfc_frames_tx_lo;
u32 pfc_frames_rx_hi;
u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
};
@@ -2638,118 +2696,6 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
-/* current drv_info version */
-#define DRV_INFO_CUR_VER 1
-
-/* drv_info op codes supported */
-enum drv_info_opcode {
- ETH_STATS_OPCODE,
- FCOE_STATS_OPCODE,
- ISCSI_STATS_OPCODE
-};
-
-#define ETH_STAT_INFO_VERSION_LEN 12
-/* Per PCI Function Ethernet Statistics required from the driver */
-struct eth_stats_info {
- /* Function's Driver Version. padded to 12 */
- u8 version[ETH_STAT_INFO_VERSION_LEN];
- /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
- u8 mac_local[8];
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
- u32 feature_flags; /* Feature_Flags. */
-#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
-#define FEATURE_ETH_LSO_MASK 0x02
-#define FEATURE_ETH_BOOTMODE_MASK 0x1C
-#define FEATURE_ETH_BOOTMODE_SHIFT 2
-#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
-#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
-#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
-#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
-#define FEATURE_ETH_TOE_MASK 0x20
- u32 lso_max_size; /* LSO MaxOffloadSize. */
- u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
- /* Num Offloaded Connections TCP_IPv4. */
- u32 ipv4_ofld_cnt;
- /* Num Offloaded Connections TCP_IPv6. */
- u32 ipv6_ofld_cnt;
- u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
- u32 txq_size; /* TX Descriptors Queue Size */
- u32 rxq_size; /* RX Descriptors Queue Size */
- /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 txq_avg_depth;
- /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 rxq_avg_depth;
- /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
- u32 iov_offload;
- /* Number of NetQueue/VMQ Config'd. */
- u32 netq_cnt;
- u32 vf_cnt; /* Num VF assigned to this PF. */
-};
-
-/* Per PCI Function FCOE Statistics required from the driver */
-struct fcoe_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u32 txq_size; /* FCoE TX Descriptors Queue Size. */
- u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
- /* FCoE TX Descriptor Queue Avg Depth. */
- u32 txq_avg_depth;
- /* FCoE RX Descriptors Queue Avg Depth. */
- u32 rxq_avg_depth;
- u32 rx_frames_lo; /* FCoE RX Frames received. */
- u32 rx_frames_hi; /* FCoE RX Frames received. */
- u32 rx_bytes_lo; /* FCoE RX Bytes received. */
- u32 rx_bytes_hi; /* FCoE RX Bytes received. */
- u32 tx_frames_lo; /* FCoE TX Frames sent. */
- u32 tx_frames_hi; /* FCoE TX Frames sent. */
- u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
- u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
-};
-
-/* Per PCI Function iSCSI Statistics required from the driver*/
-struct iscsi_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
- u8 ww_port_name[64]; /* iSCSI World wide port name */
- u8 boot_target_name[64];/* iSCSI Boot Target Name. */
- u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
- u32 boot_target_portal; /* iSCSI Boot Target Portal. */
- u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
- u32 max_frame_size; /* Max Frame Size. bytes */
- u32 txq_size; /* PDU TX Descriptors Queue Size. */
- u32 rxq_size; /* PDU RX Descriptors Queue Size. */
- u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
- u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
- u32 rx_pdus_lo; /* iSCSI PDUs received. */
- u32 rx_pdus_hi; /* iSCSI PDUs received. */
- u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
- u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
- u32 tx_pdus_lo; /* iSCSI PDUs sent. */
- u32 tx_pdus_hi; /* iSCSI PDUs sent. */
- u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
- u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
- u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
- * 9 nibbles, the position of each nibble
- * represents the C-PCP value, the value
- * of the nibble = S-PCP value.
- */
-};
-
-union drv_info_to_mcp {
- struct eth_stats_info ether_stat;
- struct fcoe_stats_info fcoe_stat;
- struct iscsi_stats_info iscsi_stat;
-};
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6e7d5c0843b..f4beb46c470 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -285,7 +285,6 @@
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
-#define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/
@@ -1306,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+
/******************************************************************/
/* PFC section */
/******************************************************************/
@@ -1540,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
/* Reset UMAC */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1631,7 +1718,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
* ports of the path
*/
- if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
+ if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
@@ -1642,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Hard reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1672,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Soft reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1730,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* update PFC */
bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@ -1785,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) {
- /* config GMII mode */
- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
@@ -1812,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
- }
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1851,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */
+ /* Enable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */
+ /* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */
+ /* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */
+ /* Disable the NIG in/out to the bmac */
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */
+ /* Enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
@@ -1902,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */
+ /* TX control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1962,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
- /* disable PFC RX & TX & STATS and set 8 COS */
+ /* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
@@ -2056,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2084,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2114,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2132,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2189,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
if (pfc_params->cos0_pauseable !=
pfc_params->cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
+ /* Nonpauseable= Lossy + pauseable = Lossless*/
e3b0_val->lb_guarantied =
PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
@@ -2388,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
-int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
- u8 cos_entry,
- u32 priority_mask, u8 port)
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
@@ -2440,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2507,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */
+ /* Output enable for RX_XCM # IF */
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@ -2556,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */
+ /* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */
+ /* Update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
if (bnx2x_status)
return bnx2x_status;
@@ -2614,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2623,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */
+ /* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
@@ -2633,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */
+ /* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */
+ /* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */
+ /* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2684,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2703,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
wb_data, 2);
udelay(30);
- /* set rx mtu */
+ /* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set tx mtu */
+ /* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2732,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
- /* reset and unreset the BigMac */
+ /* Reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */
+ /* Enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
@@ -2798,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -2810,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 init_crd, crd;
u32 count = 1000;
- /* disable port */
+ /* Disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */
+ /* Wait for init credit */
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
- msleep(5);
-
+ usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
@@ -2837,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
- /* update init credit */
+ /* Update init credit */
init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
- /* update init credit */
+ /* Update init credit */
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
@@ -2863,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
- /* probe the credit changes */
+ /* Probe the credit changes */
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
- msleep(5);
+ usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */
+ /* Enable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
@@ -2935,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -2971,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3009,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3030,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
*ret_val = 0;
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3078,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3098,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3188,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
xfer_cnt = 16 - lc_addr;
- /* enable the engine */
+ /* Enable the engine */
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */
+ /* Program slave device ID */
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3220,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
if (rc == -EFAULT)
return rc;
- /* start xfer with read op */
+ /* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3228,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3331,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
port = port ^ 1;
lane = (port<<1) + path;
- } else { /* two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
path_swap_ovr =
@@ -3409,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3430,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3522,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
{
u16 val;
struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
+ /* Read modify write pause advertizing */
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3657,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, bam37 = 0;
- struct bnx2x *bp = params->bp;
+ u16 val16 = 0, lane, i;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ };
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
- MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, 0x7415);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
- /* Disable Autoneg: re-enable it after adv is done. */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u16 sd_digital;
+ u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (sd_digital | 0x1));
-
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3704,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
val16 |= (1<<7);
/* Enable 10G Parallel Detect */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
@@ -3738,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
@@ -3755,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, &val16);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3776,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val;
-
- /* Disable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
-
- /* Disable CL36 PCS Tx */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
-
- /* Double Wide Single Data Rate @ pll rate */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
-
- /* Leave cl72 training enable, needed for KR */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Disable CL36 PCS Tx */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
+ /* Double Wide Single Data Rate @ pll rate */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2);
+ 0x2}
+ };
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Leave CL72 enabled */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- val | 0x3800);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3800);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3840,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3855,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
/* Hold rxSeqStart */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3876,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3940,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ /* Enable LPI pass through */
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+ MDIO_WC_REG_EEE_COMBO_CONTROL0,
+ 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4139,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
u16 lane)
{
struct bnx2x *bp = params->bp;
- u16 val16;
-
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
/* Set XFI clock comp as default. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, 0x014a);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, 0x0800);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX_FIR_TAP, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
- bnx2x_warpcore_reset_lane(bp, phy, 0);
+
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4260,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
if (!vars->turn_to_run_wc_rt)
return;
- /* return if there is no link partner */
+ /* Return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
@@ -4294,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */
+ /* Restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@ -4311,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
} /*params->rx_tx_asic_rst*/
}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4371,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
-
- bnx2x_warpcore_clear_regs(phy, params, lane);
- if (vars->line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
- bnx2x_warpcore_set_10G_XFI(phy, params, 0);
- } else if (vars->line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(
- phy, params, 1, 0);
- }
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+
+ bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4500,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 | 0x10);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
/* Set 1G loopback based on lane (1-copy) */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4518,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
} else {
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
-void bnx2x_sync_link(struct link_params *params,
- struct link_vars *vars)
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
@@ -4606,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
@@ -4620,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
else
vars->mac_type = MAC_TYPE_EMAC;
}
- } else { /* link down */
+ } else { /* Link down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
@@ -4629,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
@@ -4698,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */
+ /* Set the master_ln for AN */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4721,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */
+ /* Reset the unicore */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4730,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */
+ /* Wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
- /* the reset erased the previous bank value */
+ /* The reset erased the previous bank value */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4952,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
-/* program SerDes, forced speed */
+/* Program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4960,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4979,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
+ /* Clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5008,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
- /* set extended capabilities */
+ /* Set extended capabilities */
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5028,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
- /* for AN, we are always publishing full duplex */
+ /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5090,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 control1;
- /* in SGMII mode, the unicore is always slave */
+ /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
- /* set sgmii mode (and not fiber) */
+ /* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5106,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
- /* if forced speed */
+ /* If forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
- /* set speed, disable autoneg */
+ /* Set speed, disable autoneg */
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
@@ -5129,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
- /* there is nothing to set for 10M */
+ /* There is nothing to set for 10M */
break;
default:
- /* invalid speed for SGMII */
+ /* Invalid speed for SGMII */
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
- /* setting the full duplex */
+ /* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5148,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
mii_control);
} else { /* AN mode */
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -5244,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */
+ /* Resolve from gp_status in case of AN complete and not sgmii */
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
/* Update the advertised flow-controled of LD/LP in AN */
if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5468,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5617,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 tx_driver;
u16 bank;
- /* read precomp */
+ /* Read precomp */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5636,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */
+ /* Replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5732,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */
+ /* Forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */
+ /* Disable autoneg */
bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */
+ /* Program speed and duplex */
bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */
@@ -5750,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
/* AN enabled */
bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */
+ /* Program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
- /* enable autoneg */
+ /* Enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
@@ -5793,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
- /* setting the masterLn_def again after the reset */
+ /* Setting the masterLn_def again after the reset */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
@@ -5823,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt == 1000)
@@ -6054,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
- /* change the uni_phy_addr in the nig */
+ /* Change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
@@ -6074,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
- /* set aer mmd back */
+ /* Set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
- /* and md_devad */
+ /* And md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
@@ -6275,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* link is up only if both local phy and external phy are up */
+ /* Link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
@@ -6296,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
- ETH_PHY_SFP_FIBER) ||
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
@@ -6397,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
- /* reset the SerDes/XGXS */
+ /* Reset the SerDes/XGXS */
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
@@ -6430,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */
+ /* Update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6446,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
- /* reset BigMac/Xmac */
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)) {
bnx2x_bmac_rx_disable(bp, params->port);
@@ -6463,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
}
if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
@@ -6502,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
@@ -6534,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
- /* disable drain */
+ /* Disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */
+ /* Update shared memory */
bnx2x_update_mng(params, vars->link_status);
-
+ bnx2x_update_mng_eee(params, vars->eee_status);
/* Check remote fault */
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6583,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
}
if (USES_WARPCORE(bp))
@@ -6603,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -6712,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->link_status |= LINK_STATUS_SERDES_LINK;
else
vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -6745,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- msleep(1);
+ usleep_range(1000, 2000);
}
}
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6815,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6912,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
- /* ucode reboot and rst */
+ /* Ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
@@ -6956,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7050,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
"XAUI workaround has completed\n");
return 0;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
break;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
return -EINVAL;
@@ -7128,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
@@ -7276,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */
+ /* Clear the interrupt LASI status register */
bnx2x_cl45_read(bp, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
bnx2x_cl45_read(bp, phy,
@@ -7601,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7655,7 +7750,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
@@ -7692,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u32 data_array[4];
u16 addr32;
struct bnx2x *bp = params->bp;
- if (byte_cnt > 16) {
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
@@ -7728,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val, i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7765,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- msleep(1);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7801,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7811,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- int rc = -EINVAL;
+ int rc = -EOPNOTSUPP;
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7836,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val, check_limiting_mode = 0;
+ u8 val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7844,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ 2,
+ (u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
- switch (val) {
+ switch (val[0]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -7888,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
- phy->media_type = ETH_PHY_SFP_FIBER;
- DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
+ if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
+ SFP_EEPROM_COMP_CODE_LR_MASK |
+ SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ phy->req_line_speed = SPEED_1000;
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val);
+ val[0]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -7980,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return 0;
}
- /* format the warning message */
+ /* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -8026,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
timeout * 5);
return 0;
}
- msleep(5);
+ usleep_range(5000, 10000);
}
return -EINVAL;
}
@@ -8338,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
- /* check SFP+ module compatibility */
+ /* Check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
@@ -8401,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_power_sfp_module(params, phy, 1);
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
gpio_port);
- if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
bnx2x_sfp_module_detection(phy, params);
- else
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if (!rx_tx_in_reset) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
} else {
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -8469,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/
+ /* Clear LASI indication*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
@@ -8537,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
if (val)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
if ((params->feature_config_flags &
@@ -8666,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */
+ /* Wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8860,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -8877,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
@@ -8929,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */
- if (phy->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /* Power down the XAUI until link is up in case of dual-media
- * and 1G
- */
- if (DUAL_MEDIA(params)) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, &val);
- val |= (3<<10);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, val);
- }
- } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin, need to set the 10G
- * registers although it is default
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
- 0x0020);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
- 0x0008);
- }
-
+ bnx2x_8727_config_speed(phy, params);
/* Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
@@ -9105,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9585,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[])
+ u16 cmd_args[], int argc)
{
- u32 idx;
+ int idx;
u16 val;
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9599,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9607,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
}
/* Prepare argument(s) and issue command */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
@@ -9620,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9628,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return -EINVAL;
}
/* Gather returning data */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
@@ -9662,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data);
+ PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -9740,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
+static int bnx2x_8483x_eee_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9752,7 +9984,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- msleep(1);
+ usleep_range(1000, 2000);
if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
@@ -9839,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
if (initialize)
@@ -9864,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
+ phy->flags |= FLAGS_EEE_10GBT;
+ vars->eee_status |= SHMEM_EEE_10G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ rc = bnx2x_8483x_eee_timers(params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ return rc;
+ }
+ } else {
+ phy->flags &= ~FLAGS_EEE_10GBT;
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read(bp, phy,
@@ -9918,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
- if (link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed = SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed = SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+ if (link_up) {
if (legacy_status & (1<<8))
vars->duplex = DUPLEX_FULL;
else
@@ -9956,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
}
}
if (link_up) {
- DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@ -9995,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ u32 eee_shmem = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_EEE_ADV, &val2);
+ if ((val1 & val2) & 0x8) {
+ DP(NETIF_MSG_LINK, "EEE negotiated\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+ if (val2 & 0x12)
+ eee_shmem |= SHMEM_EEE_100M_ADV;
+ if (val2 & 0x4)
+ eee_shmem |= SHMEM_EEE_1G_ADV;
+ if (val2 & 0x68)
+ eee_shmem |= SHMEM_EEE_10G_ADV;
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (eee_shmem <<
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ }
}
return link_up;
@@ -10273,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip
* before determining the port.
@@ -10342,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */
+ /* Read all advertisement */
bnx2x_cl22_read(bp, phy,
0x09,
&an_1000_val);
@@ -10379,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* set 100 speed advertisement */
+ /* Set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10393,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 100M\n");
}
- /* set 10 speed advertisement */
+ /* Set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10532,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
/* Get speed operation status */
bnx2x_cl22_read(bp, phy,
- 0x19,
+ MDIO_REG_GPHY_AUX_STATUS,
&legacy_status);
DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@ -10759,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up print the AN outcome of the SFX7101 PHY */
+ /* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10771,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */
+ /* Read LP advertised speeds */
if (val2 & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11090,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -11249,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_TX_ERROR_CHECK |
+ FLAGS_EEE_10GBT),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11428,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
- phy->media_type = ETH_PHY_SFP_FIBER;
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
@@ -11968,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
@@ -12017,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
break;
}
bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
return 0;
}
@@ -12026,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
- /* disable attentions */
+ /* Disable attentions */
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */
+ /* Disable nig egress interface */
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12051,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* clear link led */
+ /* Clear link led */
bnx2x_set_mdio_clk(bp, params->chip_id, port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@ -12089,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
- /* disable nig ingress interface */
+ /* Disable nig ingress interface */
if (!CHIP_IS_E3(bp)) {
- /* reset BigMac */
+ /* Reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12148,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -12222,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
- msleep(15);
+ usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -12254,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0);
- msleep(5);
+ usleep_range(5000, 10000);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
@@ -12361,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(5);
+ usleep_range(5000, 10000);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12459,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12549,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
}
- if (rc != 0)
+ if (rc)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
0);
@@ -12630,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-static void bnx2x_analyze_link_error(struct link_params *params,
- struct link_vars *vars, u32 lss_status,
- u8 notify)
+/* Returns 0 if no change occured since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
{
struct bnx2x *bp = params->bp;
/* Compare new value with previous value */
u8 led_mode;
- u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0)
- return;
+ if ((status ^ old_status) == 0)
+ return 0;
/* If values differ */
- DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
- half_open_conn, lss_status);
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
- if (lss_status) {
- DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ if (status) {
vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
vars->link_up = 0;
- vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags |= phy_flag;
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12662,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
- DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
vars->link_up = 1;
- vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags &= ~phy_flag;
led_mode = LED_MODE_OPER;
/* Clear nig drain */
@@ -12682,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
if (notify)
bnx2x_notify_link_changed(bp);
+
+ return 1;
}
/******************************************************************************
@@ -12723,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
@@ -12740,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
}
return 0;
}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -12763,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars);
- bnx2x_warpcore_config_runtime(phy, params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f4335..51cac813005 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
struct bnx2x_phy {
u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE_10GBT (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
u32 supported;
u32 media_type;
-#define ETH_PHY_UNSPECIFIED 0x0
-#define ETH_PHY_SFP_FIBER 0x1
-#define ETH_PHY_XFP_FIBER 0x2
-#define ETH_PHY_DA_TWINAX 0x3
-#define ETH_PHY_BASE_T 0x4
-#define ETH_PHY_KR 0xf0
-#define ETH_PHY_CX4 0xf1
-#define ETH_PHY_NOT_PRESENT 0xff
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
u8 num_phys;
u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
@@ -282,6 +314,7 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u32 eee_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
-/**
- * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a665dab..9aaf863b423 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-static int int_mode;
+int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -135,7 +137,10 @@ enum bnx2x_board_type {
BCM57800_MF,
BCM57810,
BCM57810_MF,
- BCM57840,
+ BCM57840_O,
+ BCM57840_4_10,
+ BCM57840_2_20,
+ BCM57840_MFO,
BCM57840_MF,
BCM57811,
BCM57811_MF
@@ -155,6 +160,9 @@ static struct {
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
@@ -187,8 +195,17 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57810_MF
#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
#endif
-#ifndef PCI_DEVICE_ID_NX2_57840
-#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
@@ -209,7 +226,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
- { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
@@ -758,7 +778,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
- txdata = fp->txdata[cos];
+ txdata = *fp->txdata_ptr[cos];
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +896,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1603,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
- struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1730,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -2124,6 +2144,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
}
}
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2916,7 +2941,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
u8 cos)
{
- txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3055,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1);
- bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
+ bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu;
@@ -3055,7 +3080,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
- memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3089,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* insert FCoE stats from ramrod response */
if (!NO_FCOE(bp)) {
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
tstorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3172,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
- memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3203,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
@@ -3742,6 +3775,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_AFEX_EVENT_MASK)
bnx2x_handle_afex_cmd(bp,
val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4615,11 +4650,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID)
+ if (cid == BNX2X_ISCSI_ETH_CID(bp))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
#endif
- vlan_mac_obj = &bp->fp[cid].mac_obj;
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4752,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */
fp = &bp->fp[q];
- queue_params.q_obj = &fp->q_obj;
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4763,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- fp = &bp->fp[FCOE_IDX];
- queue_params.q_obj = &fp->q_obj;
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4796,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID)
- return &bnx2x_fcoe(bp, q_obj);
+ if (cid == BNX2X_FCOE_ETH_CID(bp))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
#endif
- return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5682,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init tx data */
for_each_cos_in_tx_queue(fp, cos) {
- bnx2x_init_txdata(bp, &fp->txdata[cos],
- CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
- FP_COS_TO_TXQ(fp, cos),
- BNX2X_TX_SB_INDEX_BASE + cos);
- cids[cos] = fp->txdata[cos].cid;
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
}
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
/**
@@ -5706,7 +5741,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
for_each_tx_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
- bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7090,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
- ilt->lines[cdu_ilt_start + i].page =
- bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
- /* cdu ilt pages are allocated manually so there's no need to
- set the size */
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
bnx2x_ilt_init_op(bp, INITOP_SET);
@@ -7327,6 +7360,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
void bnx2x_free_mem(struct bnx2x *bp)
{
+ int i;
+
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -7340,9 +7375,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
- bp->context.size);
-
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7463,8 @@ alloc_mem_err:
int bnx2x_alloc_mem(struct bnx2x *bp)
{
+ int i, allocated, context_size;
+
#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
@@ -7457,11 +7494,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
- bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
- BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
- bp->context.size);
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+ &bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ allocated += bp->context[i].size;
+ }
BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7618,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
- BNX2X_ETH_MAC, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+ set, BNX2X_ETH_MAC, &ramrod_flags);
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7634,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
{
switch (int_mode) {
case INT_MODE_MSI:
@@ -7590,11 +7645,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* Set number of queues for MSI-X mode */
- bnx2x_set_num_queues(bp);
-
- BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
-
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7785,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
{
u8 cos;
+ int cxt_index, cxt_offset;
+
/* FCoE Queue uses Default SB, thus has no HC capabilities */
if (!IS_FCOE_FP(fp)) {
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7823,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
fp->index, init_params->max_cos);
/* set the context pointers queue object */
- for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
init_params->cxts[cos] =
- &bp->context.vcxt[fp->txdata[cos].cid].eth;
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
}
int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7894,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7911,7 +7967,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7922,7 +7978,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
tx_index++){
/* ascertain this is a normal queue*/
- txdata = &fp->txdata[tx_index];
+ txdata = fp->txdata_ptr[tx_index];
DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
@@ -8289,7 +8345,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos)
- rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
#ifdef BNX2X_STOP_ON_ERROR
if (rc)
return;
@@ -8300,12 +8356,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000);
/* Clean all ETH MACs */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9754,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10144,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
- u32 ext_phy_type, ext_phy_config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -10149,6 +10211,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
}
void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11072,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc;
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */
@@ -11503,8 +11578,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
* {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
*/
static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11746,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
/* must be called after sriov-enable */
static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
- int cid_count = BNX2X_L2_CID_COUNT(bp);
+ int cid_count = BNX2X_L2_MAX_CID(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -11717,7 +11791,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count;
+ int rx_count, tx_count, rss_count, doorbell_size;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11745,7 +11819,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57800_MF:
case BCM57810:
case BCM57810_MF:
- case BCM57840:
+ case BCM57840_O:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_MFO:
case BCM57840_MF:
case BCM57811:
case BCM57811_MF:
@@ -11760,13 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
- /* !!! FIXME !!!
- * Do not allow the maximum SB count to grow above 16
- * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
- * We will use the FP_SB_MAX_E1x macro for this matter.
- */
- max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11847,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/*
* Maximum number of netdev Tx queues:
- * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11858,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp = netdev_priv(dev);
- BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
-
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -11803,6 +11870,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
@@ -11811,9 +11881,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
* Map doorbels here as we need the real value of bp->max_cos which
* is initialized in bnx2x_init_bp().
*/
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
+ doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -11831,8 +11907,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
/* Configure interrupt mode: try to enable MSI-X/MSI if
- * needed, set bp->num_queues appropriately.
+ * needed.
*/
bnx2x_set_int_mode(bp);
@@ -12176,6 +12256,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
struct eth_spe *spe;
+ int cxt_index, cxt_offset;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -12198,10 +12279,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(bp, &bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- BNX2X_ISCSI_ETH_CID);
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
}
/*
@@ -12488,21 +12575,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
- int ulp_type = ctl->data.ulp_type;
+ int ulp_type = ctl->data.register_data.ulp_type;
if (CHIP_IS_E3(bp)) {
int idx = BP_FW_MB_IDX(bp);
- u32 cap;
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
- cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ /* first write capability to shmem2 */
if (ulp_type == CNIC_ULP_ISCSI)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
else if (ulp_type == CNIC_ULP_FCOE)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
}
break;
}
+
case DRV_CTL_ULP_UNREGISTER_CMD: {
int ulp_type = ctl->data.ulp_type;
@@ -12554,6 +12665,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->num_irq = 2;
}
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
@@ -12632,10 +12758,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_client_id =
bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
- cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 00000000000..ddd5106ad2f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
+/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd387492a8..ec62a5c8bd3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
* 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
#define MISC_REG_CHIP_TYPE 0xac60
#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
* packets transmitted by the MAC */
#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
#define CDU_REGION_NUMBER_UCM_AG 4
-/**
- * String-to-compress [31:8] = CID (all 24 bits)
+/* String-to-compress [31:8] = CID (all 24 bits)
* String-to-compress [7:4] = Region
* String-to-compress [3:0] = Type
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82..734fd87cd99 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0df..f83e033da6d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
-/**
- * @return positive is entry was optimized, 0 - if not, negative
- * in case of an error.
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
*/
typedef int (*exe_q_optimize)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
BNX2X_RSS_IPV4,
BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
};
struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
/* Last configured indirection table */
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
int (*config_rss)(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
struct bnx2x_rx_mode_obj *o);
/**
- * Send and RX_MODE ramrod according to the provided parameters.
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
*
- * @param bp
- * @param p Command parameters
+ * @p: Command parameters
*
- * @return 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successfull and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Configure multicast MACs list. May configure a new list
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
* provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
* (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
* configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * @param bp
- * @param p
- * @param command to execute: BNX2X_MCAST_CMD_X
- *
- * @return 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was sucessfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Updates RSS configuration according to provided parameters.
- *
- * @param bp
- * @param p
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
*
- * @return 0 in case of success
+ * Return: 0 in case of success
*/
int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
/**
- * Return the current ind_table configuration.
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
*
- * @param bp
- * @param ind_table buffer to fill with the current indirection
+ * @ind_table: buffer to fill with the current indirection
* table content. Should be at least
* T_ETH_INDIRECTION_TABLE_SIZE bytes long.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785cd11d..667d89042d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
+ if (CHIP_IS_E3(bp))
+ estats->eee_tx_lpi += REG_RD(bp,
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics;
- struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics;
- struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics;
- struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff;
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_rx_queue(bp, i)
- tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i;
for_each_queue(bp, i) {
- struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
- &bp->fp[i].eth_q_stats_old;
+ &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-/**
- * This function will prepare the statistics ramrod data the way
+/* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
- *
- * @param bp
*/
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
cur_query_entry->address.hi =
cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */
for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
- memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
- memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
if (bp->stats_init) {
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
- memset(&fp->eth_q_stats_old, 0,
- sizeof(fp->eth_q_stats_old));
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
}
}
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX];
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fdfed..24b8e505b60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 2c89d17cbb2..3b4fc61f24c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ struct fcoe_capabilities *fcoe_cap =
+ &info.data.register_data.fcoe_features;
- if (reg)
+ if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
- else
+ if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+ memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+ } else {
info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+ }
info.data.ulp_type = ulp_type;
ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
+ if (!cp->ctx_tbl)
+ return -EINVAL;
+
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
@@ -612,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ else if (ulp_type == CNIC_ULP_FCOE)
+ dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -2589,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
@@ -3217,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ break;
+
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
@@ -3947,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
+ case L5CM_RAMROD_CMD_ID_CLOSE:
+ if (l4kcqe->status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
+ "status 0x%x\n", l4kcqe->status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ /* Fall through */
+ } else {
+ break;
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4250,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int i;
- cp->stop_cm(dev);
-
if (!cp->csk_tbl)
return 0;
@@ -4669,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4697,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4981,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+ u32 val;
+ pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
+ cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ func = CNIC_FUNC(cp);
+
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
@@ -5287,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
i++;
}
cnic_shutdown_rings(dev);
+ cp->stop_cm(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5516,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
rcu_read_unlock();
}
-/**
- * netdev event handler
- */
+/* netdev event handler */
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e546b..5cb88881bba 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.10"
-#define CNIC_MODULE_RELDATE "March 21, 2012"
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION "2.5.12"
+#define CNIC_MODULE_RELDATE "June 29, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
u32 cid;
};
+struct drv_ctl_register_data {
+ int ulp_type;
+ struct fcoe_capabilities fcoe_features;
+};
+
struct drv_ctl_info {
int cmd;
union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
+ struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -305,6 +313,7 @@ struct cnic_dev {
int max_rdma_conn;
union drv_info_to_mcp *stats_addr;
+ struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e47ff8be1d7..fce4c1e4dd3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,10 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#if IS_ENABLED(CONFIG_HWMON)
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -298,6 +302,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -730,44 +735,131 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
{
- int i;
u32 apedata;
- /* NCSI does not support APE events */
- if (tg3_flag(tp, APE_HAS_NCSI))
- return;
+ while (timeout_us) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return -EBUSY;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ udelay(10);
+ timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
+ }
+
+ return timeout_us ? 0 : -EBUSY;
+}
+
+static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
+{
+ u32 i, apedata;
+
+ for (i = 0; i < timeout_us / 10; i++) {
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(10);
+ }
+
+ return i == timeout_us / 10;
+}
+
+int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
+{
+ int err;
+ u32 i, bufoff, msgoff, maxlen, apedata;
+
+ if (!tg3_flag(tp, APE_HAS_NCSI))
+ return 0;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
- return;
+ return -ENODEV;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
if (!(apedata & APE_FW_STATUS_READY))
- return;
+ return -EAGAIN;
- /* Wait for up to 1 millisecond for APE to service previous event. */
- for (i = 0; i < 10; i++) {
- if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
- return;
+ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
+ TG3_APE_SHMEM_BASE;
+ msgoff = bufoff + 2 * sizeof(u32);
+ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
- apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ while (len) {
+ u32 length;
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
- event | APE_EVENT_STATUS_EVENT_PENDING);
+ /* Cap xfer sizes to scratchpad limits. */
+ length = (len > maxlen) ? maxlen : len;
+ len -= length;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 msec for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ apedata = APE_EVENT_STATUS_DRIVER_EVNT |
+ APE_EVENT_STATUS_SCRTCHPD_READ |
+ APE_EVENT_STATUS_EVENT_PENDING;
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
+
+ tg3_ape_write32(tp, bufoff, base_off);
+ tg3_ape_write32(tp, bufoff + sizeof(u32), length);
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- break;
+ base_off += length;
- udelay(100);
+ if (tg3_ape_wait_for_event(tp, 30000))
+ return -EAGAIN;
+
+ for (i = 0; length; i += 4, length -= 4) {
+ u32 val = tg3_ape_read32(tp, msgoff + i);
+ memcpy(data, &val, sizeof(u32));
+ data++;
+ }
}
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+ return 0;
+}
+
+static int tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int err;
+ u32 apedata;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return -EAGAIN;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+ return 0;
}
static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
@@ -9393,6 +9485,110 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
+{
+ int i;
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
+ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
+
+ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
+ off += len;
+
+ if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
+ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
+ memset(ocir, 0, TG3_OCIR_LEN);
+ }
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t tg3_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u32 temperature;
+
+ spin_lock_bh(&tp->lock);
+ tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ sizeof(temperature));
+ spin_unlock_bh(&tp->lock);
+ return sprintf(buf, "%u\n", temperature);
+}
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_SENSOR_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_CAUTION_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_MAX_OFFSET);
+
+static struct attribute *tg3_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tg3_group = {
+ .attrs = tg3_attributes,
+};
+
+#endif
+
+static void tg3_hwmon_close(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ if (tp->hwmon_dev) {
+ hwmon_device_unregister(tp->hwmon_dev);
+ tp->hwmon_dev = NULL;
+ sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+static void tg3_hwmon_open(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ int i, err;
+ u32 size = 0;
+ struct pci_dev *pdev = tp->pdev;
+ struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
+
+ tg3_sd_scan_scratchpad(tp, ocirs);
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++) {
+ if (!ocirs[i].src_data_length)
+ continue;
+
+ size += ocirs[i].src_hdr_length;
+ size += ocirs[i].src_data_length;
+ }
+
+ if (!size)
+ return;
+
+ /* Register hwmon sysfs hooks */
+ err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
+ return;
+ }
+
+ tp->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(tp->hwmon_dev)) {
+ tp->hwmon_dev = NULL;
+ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
+ sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+
#define TG3_STAT_ADD32(PSTAT, REG) \
do { u32 __val = tr32(REG); \
(PSTAT)->low += __val; \
@@ -9908,7 +10104,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- tp->irq_cnt = num_online_cpus();
+ tp->irq_cnt = netif_get_num_default_rss_queues();
if (tp->irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
@@ -10101,6 +10297,8 @@ static int tg3_open(struct net_device *dev)
tg3_phy_start(tp);
+ tg3_hwmon_open(tp);
+
tg3_full_lock(tp, 0);
tg3_timer_start(tp);
@@ -10150,6 +10348,8 @@ static int tg3_close(struct net_device *dev)
tg3_timer_stop(tp);
+ tg3_hwmon_close(tp);
+
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
@@ -13857,14 +14057,9 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+static void __devinit tg3_probe_ncsi(struct tg3 *tp)
{
- int vlen;
u32 apedata;
- char *fwtype;
-
- if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
- return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
@@ -13874,14 +14069,22 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
if (!(apedata & APE_FW_STATUS_READY))
return;
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ tg3_flag_set(tp, APE_HAS_NCSI);
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
- tg3_flag_set(tp, APE_HAS_NCSI);
+ if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
- } else {
+ else
fwtype = "DASH";
- }
vlen = strlen(tp->fw_ver);
@@ -13915,20 +14118,17 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
tg3_read_sb_ver(tp, val);
else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
tg3_read_hwsb_ver(tp);
- else
- return;
-
- if (vpd_vers)
- goto done;
- if (tg3_flag(tp, ENABLE_APE)) {
- if (tg3_flag(tp, ENABLE_ASF))
- tg3_read_dash_ver(tp);
- } else if (tg3_flag(tp, ENABLE_ASF)) {
- tg3_read_mgmtfw_ver(tp);
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, ENABLE_APE)) {
+ tg3_probe_ncsi(tp);
+ if (!vpd_vers)
+ tg3_read_dash_ver(tp);
+ } else if (!vpd_vers) {
+ tg3_read_mgmtfw_ver(tp);
+ }
}
-done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4..a1b75cd67b9 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2311,10 +2311,11 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
-#define TG3_APE_SEG_SIG 0x4000
-#define APE_SEG_SIG_MAGIC 0x41504521
/* APE shared memory. Accessible through BAR1 */
+#define TG3_APE_SHMEM_BASE 0x4000
+#define TG3_APE_SEG_SIG 0x4000
+#define APE_SEG_SIG_MAGIC 0x41504521
#define TG3_APE_FW_STATUS 0x400c
#define APE_FW_STATUS_READY 0x00000100
#define TG3_APE_FW_FEATURES 0x4010
@@ -2327,6 +2328,8 @@
#define APE_FW_VERSION_REVMSK 0x0000ff00
#define APE_FW_VERSION_REVSFT 8
#define APE_FW_VERSION_BLDMSK 0x000000ff
+#define TG3_APE_SEG_MSG_BUF_OFF 0x401c
+#define TG3_APE_SEG_MSG_BUF_LEN 0x4020
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
@@ -2353,6 +2356,8 @@
#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
+#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600
+#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
#define APE_EVENT_STATUS_STATE_START 0x00010000
#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
#define APE_EVENT_STATUS_STATE_WOL 0x00030000
@@ -2671,6 +2676,40 @@ struct tg3_hw_stats {
u8 __reserved4[0xb00-0x9c8];
};
+#define TG3_SD_NUM_RECS 3
+#define TG3_OCIR_LEN (sizeof(struct tg3_ocir))
+#define TG3_OCIR_SIG_MAGIC 0x5253434f
+#define TG3_OCIR_FLAG_ACTIVE 0x00000001
+
+#define TG3_TEMP_CAUTION_OFFSET 0xc8
+#define TG3_TEMP_MAX_OFFSET 0xcc
+#define TG3_TEMP_SENSOR_OFFSET 0xd4
+
+
+struct tg3_ocir {
+ u32 signature;
+ u16 version_flags;
+ u16 refresh_int;
+ u32 refresh_tmr;
+ u32 update_tmr;
+ u32 dst_base_addr;
+ u16 src_hdr_offset;
+ u16 src_hdr_length;
+ u16 src_data_offset;
+ u16 src_data_length;
+ u16 dst_hdr_offset;
+ u16 dst_data_offset;
+ u16 dst_reg_upd_offset;
+ u16 dst_sem_offset;
+ u32 reserved1[2];
+ u32 port0_flags;
+ u32 port1_flags;
+ u32 port2_flags;
+ u32 port3_flags;
+ u32 reserved2[1];
+};
+
+
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
@@ -3206,6 +3245,10 @@ struct tg3 {
const char *fw_needed;
const struct firmware *fw;
u32 fw_len; /* includes BSS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_dev;
+#endif
};
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e19cc0..550d2521ba7 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
}
/**
- * bfa_cee_attr_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE attributes
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
*/
static u32
bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
}
/**
- * bfa_cee_stats_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE stats
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
*/
static u32
bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-attributes responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
*
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-stats responses from f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
*
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
- * bfa_nw_cee_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE module
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
*/
u32
bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
}
/**
- * bfa_nw_cee_mem_claim()
- *
- * @brief Initialized CEE DMA Memory
- *
- * @param[in] cee CEE module pointer
- * dma_kva Kernel Virtual Address of CEE DMA Memory
- * dma_pa Physical Address of CEE DMA Memory
+ * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
*
- * @return void
+ * @cee: CEE module pointer
+ * @dma_kva: Kernel Virtual Address of CEE DMA Memory
+ * @dma_pa: Physical Address of CEE DMA Memory
*/
void
bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr()
- *
- * @brief Send the request to the f/w to fetch CEE attributes.
+ * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @cee: Pointer to the CEE module data structure.
*
- * @return Status
+ * Return: status
*/
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs()
- *
- * @brief Handles Mail-box interrupts for CEE module.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
+ * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
*/
static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_notify()
- *
- * @brief CEE module heart-beat failure handler.
- * @brief CEE module IOC event handler.
- *
- * @param[in] IOC event type
+ * bfa_cee_notify - CEE module heart-beat failure handler.
*
- * @return void
+ * @event: IOC event type
*/
static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
}
/**
- * bfa_nw_cee_attach()
- *
- * @brief CEE module-attach API
+ * bfa_nw_cee_attach - CEE module-attach API
*
- * @param[in] cee - Pointer to the CEE module data structure
- * ioc - Pointer to the ioc module data structure
- * dev - Pointer to the device driver module data structure
- * The device driver specific mbox ISR functions have
- * this pointer as one of the parameters.
- *
- * @return void
+ * @cee: Pointer to the CEE module data structure
+ * @ioc: Pointer to the ioc module data structure
+ * @dev: Pointer to the device driver module data structure.
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
*/
void
bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a946ccd..ad004a4c389 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
* www.brocade.com
*/
-/**
- * @file bfa_cs.h BFA common services
- */
+/* BFA common services */
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "cna.h"
-/**
- * @ BFA state machine interfaces
- */
+/* BFA state machine interfaces */
typedef void (*bfa_sm_t)(void *sm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-/**
- * For converting from state machine function to state encoding.
- */
+/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
-/**
- * State machine with entry actions.
- */
+/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
return smt[i].state;
}
-/**
- * @ Generic wait counter.
- */
+/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
wc->wc_resume(wc->wc_cbarg);
}
-/**
- * Initialize a waiting counter.
- */
+/* Initialize a waiting counter. */
static inline void
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
bfa_wc_up(wc);
}
-/**
- * Wait for counter to reach zero
- */
+/* Wait for counter to reach zero */
static inline void
bfa_wc_wait(struct bfa_wc *wc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f87733739..e423f82da49 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
-/**
- * ---------------------- adapter definitions ------------
- */
+/* ---------------------- adapter definitions ------------ */
-/**
- * BFA adapter level attributes.
- */
+/* BFA adapter level attributes. */
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
u8 trunk_capable;
};
-/**
- * ---------------------- IOC definitions ------------
- */
+/* ---------------------- IOC definitions ------------ */
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
-/**
- * Driver and firmware versions.
- */
+/* Driver and firmware versions. */
struct bfa_ioc_driver_attr {
char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
};
-/**
- * IOC PCI device attributes
- */
+/* IOC PCI device attributes */
struct bfa_ioc_pci_attr {
u16 vendor_id; /*!< PCI vendor ID */
u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
};
-/**
- * IOC states
- */
+/* IOC states */
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
-/**
- * IOC firmware stats
- */
+/* IOC firmware stats */
struct bfa_fw_ioc_stats {
u32 enable_reqs;
u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
u32 unknown_reqs;
};
-/**
- * IOC driver stats
- */
+/* IOC driver stats */
struct bfa_ioc_drv_stats {
u32 ioc_isrs;
u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
u32 rsvd;
};
-/**
- * IOC statistics
- */
+/* IOC statistics */
struct bfa_ioc_stats {
struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
BFA_IOC_TYPE_LL = 3,
};
-/**
- * IOC attributes returned in queries
- */
+/* IOC attributes returned in queries */
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
u8 rsvd[4]; /*!< 64bit align */
};
-/**
- * Adapter capability mask definition
- */
+/* Adapter capability mask definition */
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
BFA_CM_NIC = 0x04,
};
-/**
- * ---------------------- mfg definitions ------------
- */
+/* ---------------------- mfg definitions ------------ */
-/**
- * Checksum size
- */
+/* Checksum size */
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
#pragma pack(1)
-/**
- * @brief BFA adapter manufacturing block definition.
+/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
*/
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
#pragma pack()
-/**
- * ---------------------- pci definitions ------------
- */
+/* ---------------------- pci definitions ------------ */
/*
* PCI device ID information
@@ -275,9 +246,7 @@ enum {
#define bfa_asic_id_ctc(device) \
(bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
-/**
- * PCI sub-system device and vendor ID information
- */
+/* PCI sub-system device and vendor ID information */
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee2c2b..b39c5f23974 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
#include "bfa_defs.h"
-/**
- * @brief
- * FC physical port statistics.
- */
+/* FC physical port statistics. */
struct bfa_port_fc_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
-/**
- * @brief
- * Eth Physical Port statistics.
- */
+/* Eth Physical Port statistics. */
struct bfa_port_eth_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
-/**
- * @brief
- * Port statistics.
- */
+/* Port statistics. */
union bfa_port_stats_u {
struct bfa_port_fc_stats fc;
struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe87c1e..7fb396fe679 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
#include "bfa_defs.h"
-/**
- * Manufacturing block version
- */
+/* Manufacturing block version */
#define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF
-/**
- * Manufacturing block encrypted version
- */
+/* Manufacturing block encrypted version */
#define BFA_MFG_ENC_VER 2
-/**
- * Manufacturing block version 1 length
- */
+/* Manufacturing block version 1 length */
#define BFA_MFG_VER1_LEN 128
-/**
- * Manufacturing block header length
- */
+/* Manufacturing block header length */
#define BFA_MFG_HDR_LEN 4
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
-/**
- * Manufacturing card type
- */
+/* Manufacturing card type */
enum {
BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
#pragma pack(1)
-/**
- * Check if Mezz card
- */
+/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
} \
} while (0)
-/**
- * VPD data length
- */
+/* VPD data length */
#define BFA_MFG_VPD_LEN 512
#define BFA_MFG_VPD_LEN_INVALID 0
@@ -137,9 +123,7 @@ do { \
#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
-/**
- * VPD vendor tag
- */
+/* VPD vendor tag */
enum {
BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
-/**
- * @brief BFA adapter flash vpd data definition.
+/* BFA adapter flash vpd data definition.
*
* All numerical fields are in big-endian format.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c2e80..ea9af9ae754 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
-/**
- * API status return values
+/* API status return values
*
* NOTE: The error msgs are auto generated from the comments. Only singe line
* comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fafbda..959c58ef972 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-/**
- * IOC local definitions
- */
+/* IOC local definitions */
-/**
- * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
- */
+/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-/**
- * IOC state machine definitions/declarations
- */
+/* IOC state machine definitions/declarations */
enum ioc_event {
IOC_E_RESET = 1, /*!< IOC reset request */
IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
static void bfa_iocpf_stop(struct bfa_ioc *ioc);
-/**
- * IOCPF state machine events
- */
+/* IOCPF state machine events */
enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
};
-/**
- * IOCPF states
- */
+/* IOCPF states */
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
- * IOC State Machine
- */
+/* IOC State Machine */
-/**
- * Beginning state. IOC uninit state.
- */
+/* Beginning state. IOC uninit state. */
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC is in uninit state.
- */
+/* IOC is in uninit state. */
static void
bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
- * IOC is in reset state.
- */
+/* IOC is in reset state. */
static void
bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_enable(ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
bfa_ioc_send_getattr(ioc);
}
-/**
- * IOC configuration in progress. Timer is active.
- */
+/* IOC configuration in progress. Timer is active. */
static void
bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_disable(ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
{
}
-/**
- * Hardware initialization retry.
- */
+/* Hardware initialization retry. */
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOCPF State Machine
- */
+/* IOCPF State Machine */
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
iocpf->auto_recover = bfa_nw_auto_recover;
}
-/**
- * Beginning state. IOC is in reset state.
- */
+/* Beginning state. IOC is in reset state. */
static void
bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
+/* Awaiting h/w semaphore to continue with version check. */
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Notify enable completion callback
- */
+/* Notify enable completion callback */
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
msecs_to_jiffies(BFA_IOC_TOV));
}
-/**
- * Awaiting firmware version match.
- */
+/* Awaiting firmware version match. */
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Request for semaphore.
- */
+/* Request for semaphore. */
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting semaphore for h/w initialzation.
- */
+/* Awaiting semaphore for h/w initialzation. */
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
bfa_ioc_reset(iocpf->ioc, false);
}
-/**
- * Hardware is being initialized. Interrupts are enabled.
+/* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC hb ack request is being removed.
- */
+/* IOC hb ack request is being removed. */
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * @brief
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * BFA IOC private functions
- */
+/* BFA IOC private functions */
-/**
- * Notify common modules registered for notification.
- */
+/* Notify common modules registered for notification. */
static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
del_timer(&ioc->sem_timer);
}
-/**
- * @brief
- * Initialize LPU local memory (aka secondary memory / SRAM)
- */
+/* Initialize LPU local memory (aka secondary memory / SRAM) */
static void
bfa_ioc_lmem_init(struct bfa_ioc *ioc)
{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
- * Get driver and firmware versions.
- */
+/* Get driver and firmware versions. */
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/**
- * Returns TRUE if same.
- */
+/* Returns TRUE if same. */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
return true;
}
-/**
- * Return true if current running version is valid. Firmware signature and
+/* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
- * Conditionally flush any pending message from firmware at start.
- */
+/* Conditionally flush any pending message from firmware at start. */
static void
bfa_ioc_msgflush(struct bfa_ioc *ioc)
{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
-/**
- * @img ioc_init_logic.jpg
- */
static void
bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
del_timer(&ioc->hb_timer);
}
-/**
- * @brief
- * Initiate a full firmware download.
- */
+/* Initiate a full firmware download. */
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
bfa_ioc_hwinit(ioc, force);
}
-/**
- * BFA ioc enable reply by firmware
- */
+/* BFA ioc enable reply by firmware */
static void
bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
}
-/**
- * @brief
- * Update BFA configuration from firmware configuration.
- */
+/* Update BFA configuration from firmware configuration. */
static void
bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
- * Attach time initialization of mbox logic.
- */
+/* Attach time initialization of mbox logic. */
static void
bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
}
}
-/**
- * Mbox poll timer -- restarts any pending mailbox requests.
- */
+/* Mbox poll timer -- restarts any pending mailbox requests. */
static void
bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
}
}
-/**
- * Cleanup any pending requests.
- */
+/* Cleanup any pending requests. */
static void
bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
}
/**
- * Read data from SMEM to host through PCI memmap
+ * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
*
- * @param[in] ioc memory for IOC
- * @param[in] tbuf app memory to store data from smem
- * @param[in] soff smem offset
- * @param[in] sz size of smem in bytes
+ * @ioc: memory for IOC
+ * @tbuf: app memory to store data from smem
+ * @soff: smem offset
+ * @sz: size of smem in bytes
*/
static int
bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
return 0;
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
return status;
}
-/**
- * Save firmware trace if configured.
- */
+/* Save firmware trace if configured. */
static void
bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
}
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
bfa_nw_ioc_debug_save_ftrc(ioc);
}
-/**
- * IOCPF to IOC interface
- */
+/* IOCPF to IOC interface */
static void
bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
- * IOC public
- */
+/* IOC public */
static enum bfa_status
bfa_ioc_pll_init(struct bfa_ioc *ioc)
{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
return BFA_STATUS_OK;
}
-/**
- * Interface used by diag module to do firmware boot with memory test
+/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
bfa_ioc_lpu_start(ioc);
}
-/**
- * Enable/disable IOC failure auto recovery.
- */
+/* Enable/disable IOC failure auto recovery. */
void
bfa_nw_ioc_auto_recover(bool auto_recover)
{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
}
/**
- * IOC attach time initialization and setup.
+ * bfa_nw_ioc_attach - IOC attach time initialization and setup.
*
- * @param[in] ioc memory for IOC
- * @param[in] bfa driver instance structure
+ * @ioc: memory for IOC
+ * @bfa: driver instance structure
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
- * Driver detach time IOC cleanup.
- */
+/* Driver detach time IOC cleanup. */
void
bfa_nw_ioc_detach(struct bfa_ioc *ioc)
{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
}
/**
- * Setup IOC PCI properties.
+ * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
- * @param[in] pcidev PCI device information for this IOC
+ * @pcidev: PCI device information for this IOC
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
}
/**
- * Initialize IOC dma memory
+ * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
- * @param[in] dm_kva kernel virtual address of IOC dma memory
- * @param[in] dm_pa physical address of IOC dma memory
+ * @dm_kva: kernel virtual address of IOC dma memory
+ * @dm_pa: physical address of IOC dma memory
*/
void
bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr *) dm_kva;
}
-/**
- * Return size of dma memory required.
- */
+/* Return size of dma memory required. */
u32
bfa_nw_ioc_meminfo(void)
{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
- * Initialize memory for saving firmware trace.
- */
+/* Initialize memory for saving firmware trace. */
void
bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
-/**
- * Register mailbox message handler function, to be called by common modules
- */
+/* Register mailbox message handler function, to be called by common modules */
void
bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
}
/**
- * Queue a mailbox command request to firmware. Waits if mailbox is busy.
- * Responsibility of caller to serialize
+ * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
*
- * @param[in] ioc IOC instance
- * @param[i] cmd Mailbox command
+ * @ioc: IOC instance
+ * @cmd: Mailbox command
+ *
+ * Waits if mailbox is busy. Responsibility of caller to serialize
*/
bool
bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
return false;
}
-/**
- * Handle mailbox interrupts
- */
+/* Handle mailbox interrupts */
void
bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
-/**
- * return true if IOC is disabled
- */
+/* return true if IOC is disabled */
bool
bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
- * return true if IOC is operational
- */
+/* return true if IOC is operational */
bool
bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
-/**
- * Add to IOC heartbeat failure notification queue. To be used by common
+/* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
- * WWN public
- */
+/* WWN public */
static u64
bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
return ioc->attr->mac;
}
-/**
- * Firmware failure detected. Start recovery actions.
- */
+/* Firmware failure detected. Start recovery actions. */
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-/**
- * @dg hal_iocpf_pvt BFA IOC PF private functions
- * @{
- */
+/* BFA IOC PF private functions */
static void
bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
/*
* Send flash write request.
- *
- * @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
flash->offset += len;
}
-/*
- * Send flash read request.
+/**
+ * bfa_flash_read_send - Send flash read request.
*
- * @param[in] cbarg - callback argument
+ * @cbarg: callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
}
-/*
- * Process flash response messages upon receiving interrupts.
+/**
+ * bfa_flash_intr - Process flash response messages upon receiving interrupts.
*
- * @param[in] flasharg - flash structure
- * @param[in] msg - message structure
+ * @flasharg: flash structure
+ * @msg: message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Flash attach API.
+/**
+ * bfa_nw_flash_attach - Flash attach API.
*
- * @param[in] flash - flash structure
- * @param[in] ioc - ioc structure
- * @param[in] dev - device structure
+ * @flash: flash structure
+ * @ioc: ioc structure
+ * @dev: device structure
*/
void
bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
-/*
- * Claim memory for flash
+/**
+ * bfa_nw_flash_memclaim - Claim memory for flash
*
- * @param[in] flash - flash structure
- * @param[in] dm_kva - pointer to virtual memory address
- * @param[in] dm_pa - physical memory address
+ * @flash: flash structure
+ * @dm_kva: pointer to virtual memory address
+ * @dm_pa: physical memory address
*/
void
bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Get flash attribute.
+/**
+ * bfa_nw_flash_get_attr - Get flash attribute.
*
- * @param[in] flash - flash structure
- * @param[in] attr - flash attribute structure
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @attr: flash attribute structure
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
return BFA_STATUS_OK;
}
-/*
- * Update flash partition.
+/**
+ * bfa_nw_flash_update_part - Update flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - update data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: update data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
return BFA_STATUS_OK;
}
-/*
- * Read flash partition.
+/**
+ * bfa_nw_flash_read_part - Read flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - read data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: read data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460fdc14..63a85e555df 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
BFI_IOC_TRC_HDR_SZ)
-/**
- * PCI device information required by IOC
- */
+/* PCI device information required by IOC */
struct bfa_pcidev {
int pci_slot;
u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
void __iomem *pci_bar_kva;
};
-/**
- * Structure used to remember the DMA-able memory block's KVA and Physical
+/* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
#define BFA_DMA_ALIGN_SZ 256
-/**
- * smem size for Crossbow and Catapult
- */
+/* smem size for Crossbow and Catapult */
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
-/**
- * @brief BFA dma address assignment macro. (big endian format)
- */
+/* BFA dma address assignment macro. (big endian format) */
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
u32 smem_pg0;
};
-/**
- * IOC Mailbox structures
- */
+/* IOC Mailbox structures */
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
u32 msg[BFI_IOC_MSGSZ];
};
-/**
- * IOC mailbox module
- */
+/* IOC mailbox module */
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
struct bfa_ioc_mbox_mod {
struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
} mbhdlr[BFI_MC_MAX];
};
-/**
- * IOC callback function interfaces
- */
+/* IOC callback function interfaces */
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
bfa_ioc_reset_cbfn_t reset_cbfn;
};
-/**
- * IOC event notification mechanism.
- */
+/* IOC event notification mechanism. */
enum bfa_ioc_event {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
void *cbarg;
};
-/**
- * Initialize a IOC event notification structure
- */
+/* Initialize a IOC event notification structure */
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-/**
- * IOC mailbox interface
- */
+/* IOC mailbox interface */
bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
struct bfa_mbox_cmd *cmd,
bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
-/**
- * IOC interfaces
- */
+/* IOC interfaces */
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a143a..5df0b0c68c5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
-/**
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
+/* Called from bfa_ioc_attach() to map asic specific calls. */
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
ioc->ioc_hwif = &nw_hwif_ct2;
}
-/**
- * Return true if firmware of current driver matches the running firmware.
- */
+/* Return true if firmware of current driver matches the running firmware. */
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
- * Notify other functions on HB failure.
- */
+/* Notify other functions on HB failure. */
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
readl(ioc->ioc_regs.alt_ll_halt);
}
-/**
- * Host to LPU mailbox message addresses
- */
+/* Host to LPU mailbox message addresses */
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
+/* Host <-> LPU mailbox command/status registers - port 0 */
static const struct {
u32 hfn;
u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
+/* Host <-> LPU mailbox command/status registers - port 1 */
static const struct {
u32 hfn;
u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
-/**
- * Initialize IOC to port mapping.
- */
+/* Initialize IOC to port mapping. */
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
-/**
- * Set interrupt mode for a function: INTX or MSIX
- */
+/* Set interrupt mode for a function: INTX or MSIX */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
return false;
}
-/**
- * MSI-X resource allocation for 1860 with no asic block
- */
+/* MSI-X resource allocation for 1860 with no asic block */
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
-/**
- * Cleanup hw semaphore and usecnt registers
- */
+/* Cleanup hw semaphore and usecnt registers */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
return bfa_ioc_ct_sync_complete(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427f475..55067d0d25c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
* www.brocade.com
*/
-/**
- * @file bfa_msgq.c MSGQ module source file.
- */
+/* MSGQ module source file. */
#include "bfi.h"
#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df695397..1f24c23dc78 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
#pragma pack(1)
-/**
- * BFI FW image type
- */
+/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-/**
- * Msg header common to all msgs
- */
+/* Msg header common to all msgs */
struct bfi_mhdr {
u8 msg_class; /*!< @ref enum bfi_mclass */
u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-/**
- ****************************************************************************
+/****************************************************************************
*
* Scatter Gather Element and Page definition
*
****************************************************************************
*/
-/**
- * DMA addresses
- */
+/* DMA addresses */
union bfi_addr_u {
struct {
u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
} a32;
};
-/**
- * Generic DMA addr-len pair.
- */
+/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-/**
- * Mailbox message structure
- */
+/* Mailbox message structure */
#define BFI_MBMSG_SZ 7
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
-/**
- * Supported PCI function class codes (personality)
- */
+/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
-/**
- * Message Classes
- */
+/* Message Classes */
enum bfi_mclass {
BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
#define BFI_FWBOOT_ENV_OS 0
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
-/**
- * Different asic generations
- */
+/* Different asic generations */
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
};
-/**
- * BFI_IOC_H2I_GETATTR_REQ message
- */
+/* BFI_IOC_H2I_GETATTR_REQ message */
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
u32 card_type; /*!< card type */
};
-/**
- * BFI_IOC_I2H_GETATTR_REPLY message
- */
+/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
-/**
- * Firmware memory page offsets
- */
+/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
-/**
- * Firmware statistic offset
- */
+/* Firmware statistic offset */
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
-/**
- * Firmware trace offset
- */
+/* Firmware trace offset */
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
u32 hb_count; /*!< current heart beat count */
};
-/**
- * IOC hardware/firmware state
- */
+/* IOC hardware/firmware state */
enum bfi_ioc_state {
BFI_IOC_UNINIT = 0, /*!< not initialized */
BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
-/**
- * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
- */
+/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh;
u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
u32 tv_sec;
};
-/**
- * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
- */
+/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
};
#define BFI_IOC_MSGSZ 8
-/**
- * H2I Messages
- */
+/* H2I Messages */
union bfi_ioc_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- * I2H Messages
- */
+/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabea397..6704a439297 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
-/**
- * Generic REQ type
- */
+/* Generic REQ type */
struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
-/**
- * Generic RSP type
- */
+/* Generic RSP type */
struct bfi_port_generic_rsp {
struct bfi_mhdr mh; /*!< common msg header */
u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
u32 msgtag; /*!< msgtag for reply */
};
-/**
- * @todo
- * BFI_PORT_H2I_ENABLE_REQ
- */
-
-/**
- * @todo
- * BFI_PORT_I2H_ENABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_DISABLE_REQ
- */
-
-/**
- * BFI_PORT_I2H_DISABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_GET_STATS_REQ
- */
+/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
-/**
- * BFI_PORT_I2H_GET_STATS_RSP
- */
-
-/**
- * BFI_PORT_H2I_CLEAR_STATS_REQ
- */
-
-/**
- * BFI_PORT_I2H_CLEAR_STATS_RSP
- */
-
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf46b4..eef6e1f8aec 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
* www.brocade.com
*/
-/**
- * @file bfi_enet.h BNA Hardware and Firmware Interface
- */
+/* BNA Hardware and Firmware Interface */
-/**
- * Skipping statistics collection to avoid clutter.
+/* Skipping statistics collection to avoid clutter.
* Command is no longer needed:
* MTU
* TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
} a32;
};
-/**
- * T X Q U E U E D E F I N E S
- */
+/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
/* TxQ Entry Opcodes */
#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
union bfi_addr_be_u addr;
};
-/**
- * TxQ Entry Structure
- *
- */
+/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
-/**
- * R X Q U E U E D E F I N E S
- */
+/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
-/**
- * R X C O M P L E T I O N Q U E U E D E F I N E S
- */
+/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
u8 rxq_id;
};
-/**
- * E N E T C O N T R O L P A T H C O M M A N D S
- */
+/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
union bfi_addr_u pg_tbl;
union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
u16 rsvd;
};
-/**
- * ENET command messages
- */
+/* ENET command messages */
enum bfi_enet_h2i_msgs {
/* Rx Commands */
BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
};
-/**
- * The following error codes can be returned by the enet commands
- */
+/* The following error codes can be returned by the enet commands */
enum bfi_enet_err {
BFI_ENET_CMD_OK = 0,
BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
};
-/**
- * Generic Request
+/* Generic Request
*
* bfi_enet_req is used by:
* BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * Enable/Disable Request
+/* Enable/Disable Request
*
* bfi_enet_enable_req is used by:
* BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
u8 rsvd[3];
};
-/**
- * Generic Response
- */
+/* Generic Response */
struct bfi_enet_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
u16 cmd_offset; /*!< offset to invalid parameter */
};
-/**
- * GLOBAL CONFIGURATION
- */
+/* GLOBAL CONFIGURATION */
-/**
- * bfi_enet_attr_req is used by:
+/* bfi_enet_attr_req is used by:
* BFI_ENET_H2I_GET_ATTR_REQ
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * bfi_enet_attr_rsp is used by:
+/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
*/
struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
u32 rit_size;
};
-/**
- * Tx Configuration
+/* Tx Configuration
*
* bfi_enet_tx_cfg is used by:
* BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
-/**
- * Rx Configuration
+/* Rx Configuration
*
* bfi_enet_rx_cfg is used by:
* BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
} q_handles[BFI_ENET_RX_QSET_MAX];
};
-/**
- * RIT
+/* RIT
*
* bfi_enet_rit_req is used by:
* BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
u8 table[BFI_ENET_RSS_RIT_MAX];
};
-/**
- * RSS
+/* RSS
*
* bfi_enet_rss_cfg_req is used by:
* BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
struct bfi_enet_rss_cfg cfg;
};
-/**
- * MAC Unicast
+/* MAC Unicast
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
u8 rsvd[2];
};
-/**
- * MAC Unicast + VLAN
- */
+/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
-/**
- * MAC Multicast
+/* MAC Multicast
*
* bfi_enet_mac_mfilter_add_req is used by:
* BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
u8 rsvd[2];
};
-/**
- * bfi_enet_mac_mfilter_add_rsp is used by:
+/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
*/
struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
u8 rsvd1[2];
};
-/**
- * bfi_enet_mac_mfilter_del_req is used by:
+/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
*/
struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
u8 rsvd[2];
};
-/**
- * VLAN
+/* VLAN
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
-/**
- * PAUSE
+/* PAUSE
*
* bfi_enet_set_pause_req is used by:
* BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
u8 rx_pause; /* 1 = enable; 0 = disable */
};
-/**
- * DIAGNOSTICS
+/* DIAGNOSTICS
*
* bfi_enet_diag_lb_req is used by:
* BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
u8 enable; /* 1 = enable; 0 = disable */
};
-/**
- * enum for Loopback opmodes
- */
+/* enum for Loopback opmodes */
enum {
BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
};
-/**
- * STATISTICS
+/* STATISTICS
*
* bfi_enet_stats_req is used by:
* BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
union bfi_addr_u host_buffer;
};
-/**
- * defines for "stats_mask" above.
- */
+/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
u64 tx_fragments;
};
-/**
- * Complete statistics, DMAed from fw to host followed by
+/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
*/
struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe46df..c49fa312ddb 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
-/**
- * Brocade 1860 Adapter specific defines
- */
+/* Brocade 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de08e1..ede532b4e9d 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
-/**
- *
- * Macros and constants
- *
- */
+/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
@@ -356,11 +352,7 @@ do { \
} \
} while (0)
-/**
- *
- * Inline functions
- *
- */
+/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
-/**
- *
- * Function prototypes
- *
- */
+/* Function prototypes */
-/**
- * BNA
- */
+/* BNA */
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
-/**
- * MBOX
- */
+/* MBOX */
/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);
-/**
- * ETHPORT
- */
+/* ETHPORT */
/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
-/**
- * TX MODULE AND TX
- */
+/* TX MODULE AND TX */
+
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
-/**
- * RX MODULE, RX, RXF
- */
+/* RX MODULE, RX, RXF */
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-/**
- * ENET
- */
+/* ENET */
/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
-/**
- * IOCETH
- */
+/* IOCETH */
/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
enum bna_cleanup_type type);
-/**
- * BNAD
- */
+/* BNAD */
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586e376..db14f69d63b 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
}
}
-/**
- * ETHPORT
- */
+/* ETHPORT */
+
#define call_ethport_stop_cbfn(_ethport) \
do { \
if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
}
}
-/**
- * ENET
- */
+/* ENET */
+
#define bna_enet_chld_start(enet) \
do { \
enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
}
-/**
- * IOCETH
- */
+/* IOCETH */
+
#define enable_mbox_intr(_ioceth) \
do { \
u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2a953..b8c4e21fbf4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
* www.brocade.com
*/
-/**
- * File for interrupt macros and functions
- */
+/* File for interrupt macros and functions */
#ifndef __BNA_HW_DEFS_H__
#define __BNA_HW_DEFS_H__
#include "bfi_reg.h"
-/**
- *
- * SW imposed limits
- *
- */
+/* SW imposed limits */
+
#define BFI_ENET_DEF_TXQ 1
#define BFI_ENET_DEF_RXP 1
#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
}
#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
-/**
- *
- * Interrupt related bits, flags and macros
- *
- */
+
+/* Interrupt related bits, flags and macros */
#define IB_STATUS_BITS 0x0000ffff
@@ -280,11 +272,7 @@ do { \
(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
(_rcb)->q_dbell));
-/**
- *
- * TxQ, RxQ, CQ related bits, offsets, macros
- *
- */
+/* TxQ, RxQ, CQ related bits, offsets, macros */
/* TxQ Entry Opcodes */
#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
#define BNA_CQ_EF_LOCAL (1 << 20)
-/**
- *
- * Data structures
- *
- */
+/* Data structures */
struct bna_reg_offset {
u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
-/**
- * TxQ Entry Structure
+/* TxQ Entry Structure
*
* BEWARE: Load values into this structure with correct endianess.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb589f4..71144b396e0 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
#include "bna.h"
#include "bfi.h"
-/**
- * IB
- */
+/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
(u32)ib->coalescing_timeo, 0);
}
-/**
- * RXF
- */
+/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/**
- * RX
- */
+/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{1, 2},
};
-/**
- * TX
- */
+/* TX */
+
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7ea6c..d3eb8bddfb2 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
#include "bfa_cee.h"
#include "bfa_msgq.h"
-/**
- *
- * Forward declarations
- *
- */
+/* Forward declarations */
struct bna_mcam_handle;
struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
struct bna;
struct bnad;
-/**
- *
- * Enums, primitive data types
- *
- */
+/* Enums, primitive data types */
enum bna_status {
BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
int max_rit_size;
};
-/**
- *
- * IOCEth
- *
- */
+/* IOCEth */
struct bna_ioceth {
bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
struct bna *bna;
};
-/**
- *
- * Enet
- *
- */
+/* Enet */
/* Pause configuration */
struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
struct bna *bna;
};
-/**
- *
- * Ethport
- *
- */
+/* Ethport */
struct bna_ethport {
bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
struct bna *bna;
};
-/**
- *
- * Interrupt Block
- *
- */
+/* Interrupt Block */
/* Doorbell structure */
struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
int interpkt_timeo;
};
-/**
- *
- * Tx object
- *
- */
+/* Tx object */
/* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
struct bna *bna;
};
-/**
- *
- * Rx object
- *
- */
+/* Rx object */
/* Rx datapath control structure */
struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
u32 rid_mask;
};
-/**
- *
- * CAM
- *
- */
+/* CAM */
struct bna_ucam_mod {
struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
struct bna *bna;
};
-/**
- *
- * Statistics
- *
- */
+/* Statistics */
struct bna_stats {
struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
struct bfi_enet_stats_req stats_clr;
};
-/**
- *
- * BNA
- *
- */
+/* BNA */
struct bna {
struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed0306..b441f33258e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
return 0;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Tx MSIX vector(s) from the kernel
*/
static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
@@ -1354,8 +1352,7 @@ err_return:
return -1;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Rx MSIX vector(s) from the kernel
*/
static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be1127..d7833922475 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
void bnad_debugfs_init(struct bnad *bnad);
void bnad_debugfs_uninit(struct bnad *bnad);
-/**
- * MACROS
- */
+/* MACROS */
/* To set & get the stats counters */
#define BNAD_UPDATE_CTR(_bnad, _ctr) \
(((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a64157..6a68e8d9330 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
- return (u32 *)(bfi_image_ct_cna + off);
+ return (bfi_image_ct_cna + off);
break;
case BFI_ASIC_GEN_CT2:
- return (u32 *)(bfi_image_ct2_cna + off);
+ return (bfi_image_ct2_cna + off);
break;
default:
return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4e3dd..033064b7b57 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
if (status_change) {
- if (phydev->link)
+ if (phydev->link) {
+ netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
- else
+ } else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
+ }
}
}
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ netif_carrier_off(dev);
+
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
+ netif_carrier_off(netdev);
netif_device_detach(netdev);
clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f6131..2b4b4f529ab 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
- if (priv->rx_skbuff[entry] != NULL)
- continue;
-
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
- if (unlikely(skb == NULL))
- break;
-
- priv->rx_skbuff[entry] = skb;
- paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ if (priv->rx_skbuff[entry] == NULL) {
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ }
netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
priv->rx_head, priv->rx_tail);
priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
- /* Ensure descriptor is in memory before handing to h/w */
- wmb();
desc_set_rx_owner(p);
}
}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
priv->tx_tail = 0;
priv->tx_head = 0;
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
- writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
writel(value, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
- writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
+ XGMAC_OMR_RTC_256,
ioaddr + XGMAC_OMR);
/* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7c1b7..6505070abcf 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
static void set_nqsets(struct adapter *adap)
{
int i, j = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = netif_get_num_default_rss_queues();
int hwports = adap->params.nports;
int nqsets = adap->msix_nvectors - 1;
@@ -3173,6 +3173,9 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
pi->iscsic.mac_addr[3] |= 0x80;
}
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int __devinit init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3293,6 +3296,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ netdev->vlan_features |= netdev->features & VLAN_FEAT;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b280619..2dbbcbb450d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
if (!skb) {
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p;
+ td->tid_release_list = p;
break;
}
mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
+ cxgb_redirect(nr->old, nr->old_neigh,
+ nr->new, nr->new_neigh,
+ nr->daddr);
+ cxgb_neigh_update(nr->new_neigh);
break;
}
default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr)
{
struct net_device *olddev, *newdev;
- struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- n = dst_get_neighbour_noref(old);
- if (!n)
- return;
- olddev = n->dev;
-
- n = dst_get_neighbour_noref(new);
- if (!n)
- return;
- newdev = n->dev;
+ olddev = old_neigh->dev;
+ newdev = new_neigh->dev;
if (!is_offloading(olddev))
return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev);
+ e = t3_l2t_get(tdev, new, newdev, daddr);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c8833ed..8d53438638b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev)
+ struct net_device *dev, const void *daddr)
{
struct l2t_entry *e = NULL;
struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
int smt_idx;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ neigh = dst_neigh_lookup(dst, daddr);
if (!neigh)
goto done_rcu;
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
+ if (neigh)
+ neigh_release(neigh);
rcu_read_unlock();
return e;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e86436975..8cffcdfd567 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev);
+ struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1f51d..dd901c5061b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}
-/*
+/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f40b64..bff8a3cdd3d 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
return 0;
}
-/*
+/**
* t3_load_fw - download firmware
* @adapter: the adapter
* @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fbb48c..5ed49af23d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
- if (q10g > num_online_cpus())
- q10g = num_online_cpus();
+ if (q10g > netif_get_num_default_rss_queues())
+ q10g = netif_get_num_default_rss_queues();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d974afd..8596acaa402 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd566a1..fa947dfa4c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/*
+/**
* t4_mem_win_read_len - read memory through PCIE memory window
* @adap: the adapter
* @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308fc9d..9dad56101e2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* restart a TX Ethernet Queue which was stopped for lack of
* free TX Queue Descriptors ...
*/
- const struct cpl_sge_egr_update *p = (void *)cpl;
+ const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585bba39..f2d1ecdcaf9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
end = (void *)tq->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
+ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
}
write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c785cea..ad1468b3ab9 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
}
- skb->dev = netdev;
-
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489d11a..f879e922484 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
tmp = srom_rd(aprom_addr, i);
*p++ = cpu_to_le16(tmp);
}
- de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ de4x5_dbg_srom(&lp->srom);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e83bd..d266c86a53f 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.2.220u"
+#define DRV_VER "4.4.31.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -389,6 +389,7 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ struct delayed_work func_recovery_work;
u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
@@ -396,9 +397,10 @@ struct be_adapter {
u32 *pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
- bool eeh_err;
- bool ue_detected;
+ bool eeh_error;
bool fw_timeout;
+ bool hw_error;
+
u32 port_num;
bool promiscuous;
u32 function_mode;
@@ -435,6 +437,7 @@ struct be_adapter {
u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
+ int be_get_temp_freq;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -454,6 +457,9 @@ struct be_adapter {
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
(adapter->pdev->device == OC_DEVICE_ID4))
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+
+
#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
(adapter->function_mode & RDMA_ENABLED))
@@ -573,6 +579,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -593,7 +604,19 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
static inline bool be_error(struct be_adapter *adapter)
{
- return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+ return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+}
+
+static inline bool be_crit_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_error || adapter->hw_error;
+}
+
+static inline void be_clear_all_error(struct be_adapter *adapter)
+{
+ adapter->eeh_error = false;
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
}
static inline bool be_is_wol_excluded(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 921c2082af4..7fac97b4bb5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,9 +19,6 @@
#include "be.h"
#include "be_cmds.h"
-/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 64;
-
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -115,7 +112,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
}
} else {
if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
- be_get_temp_freq = 0;
+ adapter->be_get_temp_freq = 0;
if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
compl_status == MCC_STATUS_ILLEGAL_REQUEST)
@@ -144,6 +141,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
+ /* Ignore physical link event */
+ if (lancer_chip(adapter) &&
+ !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
+ return;
+
/* For the initial link status do not rely on the ASYNC event as
* it may not be received in some cases.
*/
@@ -352,7 +354,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
adapter->fw_timeout = true;
- be_detect_dump_ue(adapter);
+ be_detect_error(adapter);
return -1;
}
@@ -429,12 +431,65 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
return 0;
}
-int be_cmd_POST(struct be_adapter *adapter)
+int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 30
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(1000);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+int be_fw_wait_ready(struct be_adapter *adapter)
{
u16 stage;
int status, timeout = 0;
struct device *dev = &adapter->pdev->dev;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ return status;
+ }
+
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
@@ -565,6 +620,9 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -592,6 +650,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -610,6 +671,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay)
{
@@ -1132,7 +1194,7 @@ err:
* Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
+ u32 *if_handle, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -1152,17 +1214,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- if (mac)
- memcpy(req->mac_addr, mac, ETH_ALEN);
- else
- req->pmac_invalid = true;
+
+ req->pmac_invalid = true;
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (mac)
- *pmac_id = le32_to_cpu(resp->pmac_id);
}
err:
@@ -1210,9 +1268,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1581,7 +1636,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
- req->if_flags_mask |=
+ if (!lancer_chip(adapter) || be_physfn(adapter))
+ req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
@@ -1692,6 +1748,20 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in non recoverable error\n");
+ }
+ return status;
+ }
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1728,6 +1798,13 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+
+ if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ req->hdr.version = 1;
+ req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6);
+ }
+
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
@@ -1805,8 +1882,9 @@ err:
}
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_written, u8 *addn_status)
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_write_object *req;
@@ -1862,10 +1940,12 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
status = adapter->flash_status;
resp = embedded_payload(wrb);
- if (!status)
+ if (!status) {
*data_written = le32_to_cpu(resp->actual_write_len);
- else
+ *change_status = resp->change_status;
+ } else {
*addn_status = resp->additional_status;
+ }
return status;
@@ -2330,8 +2410,8 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac)
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2456,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
get_mac_list_cmd.va;
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
- * or one or more pseudo permanant mac addresses. If an active
- * mac_id is present, return first active mac_id found
+ * or one or more true or pseudo permanant mac addresses.
+ * If an active mac_id is present, return first active mac_id
+ * found.
*/
for (i = 0; i < mac_count; i++) {
struct get_list_macaddr *mac_entry;
@@ -2396,7 +2477,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
goto out;
}
}
- /* If no active mac_id found, return first pseudo mac addr */
+ /* If no active mac_id found, return first mac addr */
*pmac_id_active = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
@@ -2648,6 +2729,44 @@ err:
return status;
}
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_port_name *req;
+ int status;
+
+ if (!lancer_chip(adapter)) {
+ *port_name = adapter->hba_port_num + '0';
+ return 0;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
+ NULL);
+ req->hdr.version = 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+ *port_name = resp->port_name[adapter->hba_port_num];
+ } else {
+ *port_name = adapter->hba_port_num + '0';
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b3f3fc3d132..250f19b5f7b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -93,6 +93,7 @@ enum {
LINK_UP = 0x1
};
#define LINK_STATUS_MASK 0x1
+#define LOGICAL_LINK_STATUS_MASK 0x2
/* When the event code of an async trailer is link-state, the mcc_compl
* must be interpreted as follows
@@ -186,6 +187,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1081,13 +1083,25 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/******************** RSS Config *******************/
-/* RSS types */
+/******************** RSS Config ****************************************/
+/* RSS type Input parameters used to compute RX hash
+ * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
+ * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6
+ * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT
+ * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT
+ *
+ * When multiple RSS types are enabled, HW picks the best hash policy
+ * based on the type of the received packet.
+ */
#define RSS_ENABLE_NONE 0x0
#define RSS_ENABLE_IPV4 0x1
#define RSS_ENABLE_TCP_IPV4 0x2
#define RSS_ENABLE_IPV6 0x4
#define RSS_ENABLE_TCP_IPV6 0x8
+#define RSS_ENABLE_UDP_IPV4 0x10
+#define RSS_ENABLE_UDP_IPV6 0x20
struct be_cmd_req_rss_config {
struct be_cmd_req_hdr hdr;
@@ -1163,6 +1177,8 @@ struct lancer_cmd_req_write_object {
u32 addr_high;
};
+#define LANCER_NO_RESET_NEEDED 0x00
+#define LANCER_FW_RESET_NEEDED 0x02
struct lancer_cmd_resp_write_object {
u8 opcode;
u8 subsystem;
@@ -1173,6 +1189,8 @@ struct lancer_cmd_resp_write_object {
u32 resp_len;
u32 actual_resp_len;
u32 actual_write_len;
+ u8 change_status;
+ u8 rsvd3[3];
};
/************************ Lancer Read FW info **************/
@@ -1502,6 +1520,17 @@ struct be_cmd_resp_get_hsw_config {
u32 rsvd;
};
+/******************* get port names ***************/
+struct be_cmd_req_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+};
+
+struct be_cmd_resp_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u8 port_name[4];
+};
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1656,7 +1685,7 @@ struct be_cmd_req_set_ext_fat_caps {
};
extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
@@ -1664,8 +1693,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
- u32 domain);
+ u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1719,10 +1747,11 @@ extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *addn_status);
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
@@ -1745,14 +1774,15 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
extern int be_cmd_get_phy_info(struct be_adapter *adapter);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern void be_detect_error(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id,
+ u8 domain);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
@@ -1765,4 +1795,7 @@ extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd,
struct be_fat_conf_params *cfgs);
+extern int lancer_wait_ready(struct be_adapter *adapter);
+extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 63e51d47690..e34be1c7ae8 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -648,7 +648,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ecmd->autoneg != 0)
+ if (ecmd->autoneg != adapter->phy.fc_autoneg)
return -EINVAL;
adapter->tx_fc = ecmd->tx_pause;
adapter->rx_fc = ecmd->rx_pause;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c501fa..b755f7061dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -45,20 +45,19 @@
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
#define SLIPORT_ERROR1_OFFSET 0x40C
#define SLIPORT_ERROR2_OFFSET 0x410
+#define PHYSDEV_CONTROL_OFFSET 0x414
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
#define SLIPORT_STATUS_RDY_MASK 0x00800000
-
-
#define SLI_PORT_CONTROL_IP_MASK 0x08000000
-
-#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
+#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
+#define PHYSDEV_CONTROL_INP_MASK 0x40000000
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 501dfa9c88e..4d967717449 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -155,7 +155,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
@@ -201,7 +201,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -220,7 +220,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_pa_hi = upper_32_bits(addr);
wrb->frag_pa_lo = addr & 0xFFFFFFFF;
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+ wrb->rsvd0 = 0;
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
@@ -703,33 +709,56 @@ dma_err:
return 0;
}
+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
struct be_queue_info *txq = &txo->q;
+ struct iphdr *ip = NULL;
u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
+ u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
- /* For vlan tagged pkts, BE
- * 1) calculates checksum even when CSO is not requested
- * 2) calculates checksum wrongly for padded pkt less than
- * 60 bytes long.
- * As a workaround disable TX vlan offloading in such cases.
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+
+ /* HW has a bug which considers padding bytes as legal
+ * and modifies the IPv4 hdr's 'tot_len' field
*/
- if (unlikely(vlan_tx_tag_present(skb) &&
- (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto tx_drop;
+ if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
+ is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ }
- skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ be_vlan_tag_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb);
if (unlikely(!skb))
goto tx_drop;
-
- skb->vlan_tci = 0;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -786,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
* A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
* If the user configures more, place BE in vlan promiscuous mode.
*/
-static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+static int be_vid_config(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
- u16 vtag[BE_NUM_VLANS_SUPPORTED];
- u16 ntags = 0, i;
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ u16 num = 0, i;
int status = 0;
- if (vf) {
- vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
- status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
- 1, 1, 0);
- }
-
/* No need to further configure vids if in promiscuous mode */
if (adapter->promiscuous)
return 0;
@@ -809,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_N_VID; i++)
if (adapter->vlan_tag[i])
- vtag[ntags++] = cpu_to_le16(i);
+ vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vtag, ntags, 1, 0);
+ vids, num, 1, 0);
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
@@ -841,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added++;
@@ -863,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
@@ -890,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
if (adapter->vlans_added)
- be_vid_config(adapter, false, 0);
+ be_vid_config(adapter);
}
/* Enable multicast promisc if num configured exceeds what we support */
@@ -1057,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
while (dev) {
vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
- if (dev->is_virtfn && dev->devfn == vf_fn) {
+ if (dev->is_virtfn && dev->devfn == vf_fn &&
+ dev->bus->number == pdev->bus->number) {
vfs++;
if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
assigned_vfs++;
@@ -1203,16 +1228,16 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Copy data in the first descriptor of this completion */
curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
- /* Copy the header portion into skb_data */
- hdr_len = min(BE_HDR_LEN, curr_frag_len);
- memcpy(skb->data, start, hdr_len);
skb->len = curr_frag_len;
if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ memcpy(skb->data, start, curr_frag_len);
/* Complete packet has now been moved to data */
put_page(page_info->page);
skb->data_len = 0;
skb->tail += curr_frag_len;
} else {
+ hdr_len = ETH_HLEN;
+ memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
@@ -1709,9 +1734,10 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_clean(eqo);
- if (eqo->q.created)
+ if (eqo->q.created) {
+ be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ }
be_queue_free(adapter, &eqo->q);
}
}
@@ -1898,6 +1924,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
*/
adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
num_irqs(adapter) + 1 : 1;
+ if (adapter->num_rx_qs != MAX_RX_QS) {
+ rtnl_lock();
+ netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_qs);
+ rtnl_unlock();
+ }
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2067,13 +2099,13 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
-void be_detect_dump_ue(struct be_adapter *adapter)
+void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (adapter->eeh_err || adapter->ue_detected)
+ if (be_crit_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2094,16 +2126,24 @@ void be_detect_dump_ue(struct be_adapter *adapter)
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
- ue_lo = (ue_lo & (~ue_lo_mask));
- ue_hi = (ue_hi & (~ue_hi_mask));
+ ue_lo = (ue_lo & ~ue_lo_mask);
+ ue_hi = (ue_hi & ~ue_hi_mask);
}
if (ue_lo || ue_hi ||
sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->ue_detected = true;
- adapter->eeh_err = true;
+ adapter->hw_error = true;
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport status 0x%x\n", sliport_status);
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport error1 0x%x\n", sliport_err1);
dev_err(&adapter->pdev->dev,
- "Unrecoverable error in the card\n");
+ "ERR: sliport error2 0x%x\n", sliport_err2);
}
if (ue_lo) {
@@ -2113,6 +2153,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
"UE: %s bit set\n", ue_status_low_desc[i]);
}
}
+
if (ue_hi) {
for (i = 0; ue_hi; ue_hi >>= 1, i++) {
if (ue_hi & 1)
@@ -2121,14 +2162,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "sliport error2 0x%x\n", sliport_err2);
- }
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2141,12 +2174,14 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
+ u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_want(adapter) && be_physfn(adapter) &&
- !be_is_mc(adapter))
- return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- else
- return 0;
+ !be_is_mc(adapter)) {
+ num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
+ }
+ return num;
}
static void be_msix_enable(struct be_adapter *adapter)
@@ -2540,11 +2575,7 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
-
be_msix_disable(adapter);
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
return 0;
}
@@ -2602,8 +2633,8 @@ static int be_vf_setup(struct be_adapter *adapter)
cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &vf_cfg->if_handle, NULL, vf + 1);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -2643,29 +2674,43 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->phy.forced_port_speed = -1;
}
-static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
+ bool *active_mac, u32 *pmac_id)
{
- u32 pmac_id;
- int status;
- bool pmac_id_active;
+ int status = 0;
- status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
- &pmac_id, mac);
- if (status != 0)
- goto do_none;
+ if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ if (!lancer_chip(adapter) && !be_physfn(adapter))
+ *active_mac = true;
+ else
+ *active_mac = false;
- if (pmac_id_active) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ return status;
+ }
- if (!status)
- adapter->pmac_id[0] = pmac_id;
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, mac,
+ active_mac, pmac_id, 0);
+ if (*active_mac) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, if_handle,
+ *pmac_id);
+ }
+ } else if (be_physfn(adapter)) {
+ /* For BE3, for PF get permanent MAC */
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true,
+ 0, 0);
+ *active_mac = false;
} else {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ /* For BE3, for VF get soft MAC assigned by PF*/
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ if_handle, 0);
+ *active_mac = true;
}
-do_none:
return status;
}
@@ -2686,12 +2731,12 @@ static int be_get_config(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
+ bool active_mac;
be_setup_init(adapter);
@@ -2717,14 +2762,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0, 0);
- if (status)
- return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2734,27 +2771,36 @@ static int be_setup(struct be_adapter *adapter)
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
}
+
+ if (lancer_chip(adapter) && !be_physfn(adapter)) {
+ en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ cap_flags = en_flags;
+ }
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ &adapter->if_handle, 0);
if (status != 0)
goto err;
- /* The VF's permanent mac queried from card is incorrect.
- * For BEx: Query the mac configued by the PF using if_handle
- * For Lancer: Get and use mac_list to obtain mac address.
- */
- if (!be_physfn(adapter)) {
- if (lancer_chip(adapter))
- status = be_add_mac_from_list(adapter, mac);
- else
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
- adapter->if_handle, 0);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ memset(mac, 0, ETH_ALEN);
+ active_mac = false;
+ status = be_get_mac_addr(adapter, mac, adapter->if_handle,
+ &active_mac, &adapter->pmac_id[0]);
+ if (status != 0)
+ goto err;
+
+ if (!active_mac) {
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status != 0)
+ goto err;
+ }
+
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
}
status = be_tx_qs_create(adapter);
@@ -2763,7 +2809,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- be_vid_config(adapter, false, 0);
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
@@ -2773,8 +2820,6 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- pcie_set_readrq(adapter->pdev, 4096);
-
if (be_physfn(adapter) && num_vfs) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
@@ -2788,8 +2833,6 @@ static int be_setup(struct be_adapter *adapter)
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
-
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
return 0;
err:
be_clear(adapter);
@@ -3033,6 +3076,40 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
return 0;
}
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+ u32 reg_val;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+ reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+ if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+ break;
+
+ ssleep(1);
+ }
+
+ if (i == SLIPORT_IDLE_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_fw_reset(struct be_adapter *adapter)
+{
+ int status = 0;
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
+ PHYSDEV_CONTROL_OFFSET);
+
+ return status;
+}
+
static int lancer_fw_download(struct be_adapter *adapter,
const struct firmware *fw)
{
@@ -3047,6 +3124,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
u32 offset = 0;
int status = 0;
u8 add_status = 0;
+ u8 change_status;
if (!IS_ALIGNED(fw->size, sizeof(u32))) {
dev_err(&adapter->pdev->dev,
@@ -3079,9 +3157,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
memcpy(dest_image_ptr, data_ptr, chunk_size);
status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
-
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
if (status)
break;
@@ -3093,8 +3172,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
if (!status) {
/* Commit the FW written */
status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
@@ -3107,6 +3188,20 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ status = lancer_fw_reset(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter busy for FW reset.\n"
+ "New FW will not be active.\n");
+ goto lancer_fw_exit;
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW"
+ " to be active\n");
+ }
+
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
lancer_fw_exit:
return status;
@@ -3435,10 +3530,15 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_roce_dev_remove(adapter);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -3530,6 +3630,9 @@ static int be_get_initial_config(struct be_adapter *adapter)
if (be_is_wol_supported(adapter))
adapter->wol = true;
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
+
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
@@ -3585,101 +3688,68 @@ static int be_dev_type_check(struct be_adapter *adapter)
return 0;
}
-static int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_recover_func(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 30
- u32 sliport_status;
- int status = 0, i;
+ int status;
- for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
- msleep(1000);
- }
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
- if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
+ be_clear(adapter);
- return status;
-}
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
-static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
}
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery succeeded\n");
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
+
return status;
}
-static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+static void be_func_recovery_task(struct work_struct *work)
{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, func_recovery_work.work);
int status;
- u32 sliport_status;
-
- if (adapter->eeh_err || adapter->ue_detected)
- return;
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ be_detect_error(adapter);
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "Adapter in error state."
- "Trying to recover.\n");
+ if (adapter->hw_error && lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
- if (status)
- goto err;
+ if (adapter->eeh_error)
+ goto out;
+ rtnl_lock();
netif_device_detach(adapter->netdev);
+ rtnl_unlock();
- if (netif_running(adapter->netdev))
- be_close(adapter->netdev);
-
- be_clear(adapter);
-
- adapter->fw_timeout = false;
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev)) {
- status = be_open(adapter->netdev);
- if (status)
- goto err;
- }
-
- netif_device_attach(adapter->netdev);
+ status = lancer_recover_func(adapter);
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery succeeded\n");
+ if (!status)
+ netif_device_attach(adapter->netdev);
}
- return;
-err:
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery failed\n");
+
+out:
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
}
static void be_worker(struct work_struct *work)
@@ -3690,11 +3760,6 @@ static void be_worker(struct work_struct *work)
struct be_eq_obj *eqo;
int i;
- if (lancer_chip(adapter))
- lancer_test_and_recover_fn_err(adapter);
-
- be_detect_dump_ue(adapter);
-
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
@@ -3710,6 +3775,9 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
+ if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
for_all_rx_queues(adapter, rxo, i) {
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
@@ -3727,10 +3795,7 @@ reschedule:
static bool be_reset_required(struct be_adapter *adapter)
{
- u32 reg;
-
- pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
- return reg;
+ return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
}
static int __devinit be_probe(struct pci_dev *pdev,
@@ -3739,6 +3804,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
int status = 0;
struct be_adapter *adapter;
struct net_device *netdev;
+ char port_name;
status = pci_enable_device(pdev);
if (status)
@@ -3749,7 +3815,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_dev;
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
if (netdev == NULL) {
status = -ENOMEM;
goto rel_reg;
@@ -3780,22 +3846,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
- if (lancer_chip(adapter)) {
- status = lancer_wait_ready(adapter);
- if (!status) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
- status = lancer_test_and_set_rdy_state(adapter);
- }
- if (status) {
- dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto ctrl_clean;
- }
- }
-
/* sync up with fw's ready state */
if (be_physfn(adapter)) {
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
goto ctrl_clean;
}
@@ -3826,6 +3879,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto stats_clean;
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
@@ -3839,8 +3893,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_roce_dev_add(adapter);
- dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
- adapter->port_num);
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
+
+ be_cmd_query_port_name(adapter, &port_name);
+
+ dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
+ port_name);
return 0;
@@ -3872,6 +3931,8 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->wol)
be_setup_wol(adapter, true);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
@@ -3912,6 +3973,9 @@ static int be_resume(struct pci_dev *pdev)
be_open(netdev);
rtnl_unlock();
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
if (adapter->wol)
@@ -3931,6 +3995,7 @@ static void be_shutdown(struct pci_dev *pdev)
return;
cancel_delayed_work_sync(&adapter->work);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
netif_device_detach(adapter->netdev);
@@ -3950,9 +4015,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- adapter->eeh_err = true;
+ adapter->eeh_error = true;
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
+ rtnl_lock();
netif_device_detach(netdev);
+ rtnl_unlock();
if (netif_running(netdev)) {
rtnl_lock();
@@ -3980,9 +4049,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
int status;
dev_info(&adapter->pdev->dev, "EEH reset\n");
- adapter->eeh_err = false;
- adapter->ue_detected = false;
- adapter->fw_timeout = false;
+ be_clear_all_error(adapter);
status = pci_enable_device(pdev);
if (status)
@@ -3993,7 +4060,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4015,6 +4082,10 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto err;
+
status = be_setup(adapter);
if (status)
goto err;
@@ -4024,6 +4095,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
return;
err:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a3816781054..94b7bfcdb24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
};
/**
- * ethoc_probe() - initialize OpenCores ethernet MAC
+ * ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1057,7 +1057,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Check the MAC again for validity, if it still isn't choose and
* program a random one. */
if (!is_valid_ether_addr(netdev->dev_addr)) {
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
random_mac = true;
}
@@ -1140,7 +1140,7 @@ out:
}
/**
- * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 16b07048274..74d749e29aa 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -479,9 +479,14 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
rxdes = ftgmac100_current_rxdes(priv);
} while (!done);
- if (skb->len <= 64)
+ /* Small frames are copied into linear part of skb to free one page */
+ if (skb->len <= 128) {
skb->truesize -= PAGE_SIZE;
- __pskb_pull_tail(skb, min(skb->len, 64U));
+ __pskb_pull_tail(skb, skb->len);
+ } else {
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 829b1092fd7..b901a01e3fa 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -441,11 +441,14 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
skb->len += length;
skb->data_len += length;
- /* page might be freed in __pskb_pull_tail() */
- if (length > 64)
+ if (length > 128) {
skb->truesize += PAGE_SIZE;
- __pskb_pull_tail(skb, min(length, 64));
-
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ } else {
+ /* Small frames are copied into linear part to free one page */
+ __pskb_pull_tail(skb, length);
+ }
ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
ftmac100_rx_pointer_advance(priv);
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5115a..fffd20528b5 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
#include <asm/cacheflush.h>
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * fec_poll_controller: FEC Poll controller function
+/**
+ * fec_poll_controller - FEC Poll controller function
* @dev: The FEC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
static void __devinit fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
+ int msec = 1;
struct device_node *np = pdev->dev.of_node;
if (!np)
return;
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (msec > 1000)
+ msec = 1;
+
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
return;
}
- msleep(1);
+ msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct pinctrl *pinctrl;
+ struct regulator *reg_phy;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- fec_reset_phy(pdev);
-
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
+ reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(reg_phy)) {
+ ret = regulator_enable(reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ }
+
+ fec_reset_phy(pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
failed_pin:
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5d037..9527b28d70d 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
{
+ u32 status;
+
/* Set the PHY address and the register address we want to write */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
out_be32(&regs->miimcon, value);
/* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & MIIMIND_BUSY)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
- return 0;
+ return status ? 0 : -ETIMEDOUT;
}
/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
{
u16 value;
+ u32 status;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
out_be32(&regs->miimcom, 0);
out_be32(&regs->miimcom, MII_READ_COMMAND);
- /* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
- cpu_relax();
+ /* Wait for the transaction to finish, normally less than 100us */
+ status = spin_event_timeout(!(in_be32(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)),
+ MII_TIMEOUT, 0);
+ if (!status)
+ return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
- int timeout = PHY_INIT_TIMEOUT;
+ u32 status;
mutex_lock(&bus->mdio_lock);
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
- while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
mutex_unlock(&bus->mdio_lock);
- if (timeout < 0) {
+ if (!status) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ab1d80ff079..4605f724668 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
-/*
- * drivers/net/ethernet/freescale/gianfar.c
+/* drivers/net/ethernet/freescale/gianfar.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
- addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
- vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
- addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
- vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size, GFP_KERNEL);
+ tx_queue->tx_ring_size,
+ GFP_KERNEL);
if (!tx_queue->tx_skbuff) {
netif_err(priv, ifup, ndev,
"Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size, GFP_KERNEL);
+ rx_queue->rx_ring_size,
+ GFP_KERNEL);
if (!rx_queue->rx_skbuff) {
netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
int i;
baddr = &regs->tbase0;
- for(i = 0; i < priv->num_tx_queues; i++) {
+ for (i = 0; i < priv->num_tx_queues; i++) {
gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
baddr = &regs->rbase0;
- for(i = 0; i < priv->num_rx_queues; i++) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
}
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing or locking
- * depending on the approprate variables */
+ * depending on the approprate variables
+ */
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_bytes = rx_bytes;
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
void lock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
void lock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
void unlock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
void unlock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
static void free_rx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
static void unmap_group_regs(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < MAXGROUPS; i++)
if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
static void disable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
static void enable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_enable(&priv->gfargrp[i].napi);
}
static int gfar_parse_group(struct device_node *np,
- struct gfar_private *priv, const char *model)
+ struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
priv->gfargrp[priv->num_grps].priv = priv;
spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
- if(priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np,
- "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map =
- queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np,
- "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map =
- queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ if (priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
- /* Init Rx queue filer rule set linked list*/
+ /* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
} else {
priv->mode = SQ_SG_MODE;
err = gfar_parse_group(np, priv, model);
- if(err)
+ if (err)
goto err_grp_init;
}
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
mac_addr = of_get_mac_address(np);
+
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
- FSL_GIANFAR_DEV_HAS_CSUM |
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -781,7 +781,7 @@ err_grp_init:
}
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
unsigned int new_bit_map = 0x0;
int mask = 0x1 << (max_qs - 1), i;
+
for (i = 0; i < max_qs; i++) {
if (bit_map & mask)
new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* MPC8313 Rev 2.0 and higher; All MPC837x */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_74;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_A002;
/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
priv->errata |= GFAR_ERRATA_12;
if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
}
/* Set up the ethernet device structure, private data,
- * and anything else we need before we start */
+ * and anything else we need before we start
+ */
static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_detect_errata(priv);
- /* Stop the DMA engine now, in case it was running before */
- /* (The firmware could have used it, and left it running). */
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
gfar_halt(dev);
/* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->padding = 0;
if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
/* Need to reverse the bit maps as bit_map's MSB is q0
* but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers */
+ * basically reverses the queue numbers
+ */
for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map = reverse_bitmap(
- priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map = reverse_bitmap(
- priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ priv->gfargrp[i].tx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
}
/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups */
+ * also assign queues to groups
+ */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
+ priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
+ priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer*/
+ /* always enable rx filer */
priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
}
device_init_wakeup(&dev->dev,
- priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
- /* Even more device info helps when determining which kernel */
- /* provided which set of benchmarks. */
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
- for(i = 0; i < priv->num_tx_queues; i++)
+ for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
netif_device_detach(ndev);
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
if (!netif_running(ndev)) {
netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
- if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
+ }
else {
phy_interface_t interface = priv->interface;
- /*
- * This isn't autodetected right now, so it must
+ /* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-/*
- * Initialize TBI PHY interface for communicating with the
+/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
return;
}
- /*
- * If the link is already up, we must already be ok, and don't need to
+ /* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, MII_ADVERTISE,
- ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM);
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
- phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
- BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
}
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
- /*
- * Normaly TSEC should not hang on GRS commands, so we should
+ /* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
return 0;
- /*
- * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
u32 tempval;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
- != (DMACTRL_GRS | DMACTRL_GTS)) {
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+ (DMACTRL_GRS | DMACTRL_GTS)) {
int ret;
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
}
free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
continue;
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ j++) {
txbdp++;
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
- DMA_FROM_DEVICE);
+ rxbdp->bufPtr, priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
}
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
}
/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
+ * Then free tx_skbuff and rx_skbuff
+ */
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
+
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
- if(tx_queue->tx_skbuff)
+ if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(rx_queue->rx_skbuff)
+ if (rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(&priv->ofdev->dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- priv->tx_queue[0]->tx_bd_base,
- priv->tx_queue[0]->tx_bd_dma_base);
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
}
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
}
void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
+ unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
@@ -1794,11 +1805,11 @@ void gfar_configure_coalescing(struct gfar_private *priv,
* multiple queues, there's only single reg to program
*/
gfar_write(&regs->txic, 0);
- if(likely(priv->tx_queue[0]->txcoalescing))
+ if (likely(priv->tx_queue[0]->txcoalescing))
gfar_write(&regs->txic, priv->tx_queue[0]->txic);
gfar_write(&regs->rxic, 0);
- if(unlikely(priv->rx_queue[0]->rxcoalescing))
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
@@ -1825,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
int err;
/* If the device has multiple interrupts, register for
- * them. Otherwise, only register for the one */
+ * them. Otherwise, only register for the one
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
- * Transmit, and Receive */
- if ((err = request_irq(grp->interruptError, gfar_error, 0,
- grp->int_name_er,grp)) < 0) {
+ * Transmit, and Receive
+ */
+ if ((err = request_irq(grp->interruptError, gfar_error,
+ 0, grp->int_name_er, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptError);
@@ -1838,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto tx_irq_fail;
}
- if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
- grp->int_name_rx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptReceive, gfar_receive,
+ 0, grp->int_name_rx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptReceive);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
- grp->int_name_tx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto err_irq_fail;
@@ -1912,8 +1925,9 @@ irq_fail:
return err;
}
-/* Called when something needs to use the ethernet device */
-/* Returns 0 for success. */
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1958,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
}
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
- int fcb_length)
+ int fcb_length)
{
- u8 flags = 0;
-
/* If we're here, it's a IP packet with a TCP or UDP
* payload. We set it to checksum, using a pseudo-header
* we provide
*/
- flags = TXFCB_DEFAULT;
+ u8 flags = TXFCB_DEFAULT;
- /* Tell the controller what the protocol is */
- /* And provide the already calculated phcs */
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
fcb->phcs = udp_hdr(skb)->check;
@@ -1979,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
/* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
- * l3 hdr and the l4 hdr */
+ * l3 hdr and the l4 hdr
+ */
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
@@ -1993,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
- struct txbd8 *base, int ring_size)
+ struct txbd8 *base, int ring_size)
{
struct txbd8 *new_bd = bdp + stride;
@@ -2001,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
}
static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
- int ring_size)
+ int ring_size)
{
return skip_txbd(bdp, 1, base, ring_size);
}
-/* This is called by the kernel when a frame is ready for transmission. */
-/* It is pointed to by the dev->hard_start_xmit function pointer */
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2022,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
- /*
- * TOE=1 frames larger than 2500 bytes may see excess delays
+ /* TOE=1 frames larger than 2500 bytes may see excess delays
* before start of transmission.
*/
if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
int ret;
ret = skb_checksum_help(skb);
@@ -2044,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
+ priv->hwts_tx_en)) {
do_tstamp = 1;
fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
}
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ vlan_tx_tag_present(skb) ||
+ unlikely(do_tstamp)) &&
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2096,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
- tx_queue->tx_ring_size);
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
if (unlikely(do_tstamp))
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
- TXBD_INTERRUPT);
+ TXBD_INTERRUPT);
else
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
@@ -2113,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb_shinfo(skb)->frags[i].size;
lstatus = txbdp->lstatus | length |
- BD_LFLAG(TXBD_READY);
+ BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
if (i == nr_frags - 1)
@@ -2143,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
/* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
- && ((unsigned long)fcb % 0x20) > 0x18)) {
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ ((unsigned long)fcb % 0x20) > 0x18)) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
} else {
@@ -2172,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
- /*
- * If time stamping is requested one additional TxBD must be set up. The
+ /* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
* the full frame length.
@@ -2183,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2191,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, skb->len);
- /*
- * We can work in parallel with gfar_clean_tx_ring(), except
+ /* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
* when we were reading the num_txbdfree and checking for available
* space, that's because outside of this function it can only grow,
@@ -2205,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /*
- * The powerpc-specific eieio() is used, as wmb() has too strong
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
@@ -2222,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
/* Update the current skb pointer to the next entry we will use
- * (wrapping if necessary) */
+ * (wrapping if necessary)
+ */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
- TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
@@ -2232,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
- are full. We need to tell the kernel to stop sending us stuff. */
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
if (!tx_queue->num_txbdfree) {
netif_tx_stop_queue(txq);
@@ -2357,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
frame_size += priv->padding;
- tempsize =
- (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
+ tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
/* Only stop and start the controller if it isn't already
- * stopped, and we changed something */
+ * stopped, and we changed something
+ */
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
stop_gfar(dev);
@@ -2375,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
/* If the mtu is larger than the max size for standard
* ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length */
+ * to allow huge frames, and to check the length
+ */
tempval = gfar_read(&regs->maccfg2);
if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2400,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
- reset_task);
+ reset_task);
struct net_device *dev = priv->ndev;
if (dev->flags & IFF_UP) {
@@ -2427,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}
/* Interrupt Handler for Transmit complete */
@@ -2461,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
frags = skb_shinfo(skb)->nr_frags;
- /*
- * When time stamping, one additional TxBD must be freed.
+ /* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2476,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
/* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) &&
- (lstatus & BD_LENGTH_MASK))
+ (lstatus & BD_LENGTH_MASK))
break;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2486,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
buflen = bdp->length;
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- buflen, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2503,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
}
bytes_sent += skb->len;
- /*
- * If there's room in the queue (limit it to rx_buffer_size)
+ /* If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
@@ -2528,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
skb_dirtytx = (skb_dirtytx + 1) &
- TX_RING_MOD_MASK(tx_ring_size);
+ TX_RING_MOD_MASK(tx_ring_size);
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2558,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
__napi_schedule(&gfargrp->napi);
} else {
- /*
- * Clear IEVENT, so interrupts aren't called again
+ /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2576,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
}
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2587,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_init_rxbdp(rx_queue, bdp, buf);
}
-static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2601,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff * gfar_new_skb(struct net_device *dev)
+struct sk_buff *gfar_new_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2619,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
- /* If the packet was truncated, none of the other errors
- * matter */
+ /* If the packet was truncated, none of the other errors matter */
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
@@ -2661,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
* were verified, then we tell the kernel that no
- * checksumming is necessary. Otherwise, it is */
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -2669,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
-/* gfar_process_frame() -- handle one incoming packet if skb
- * isn't NULL. */
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi)
{
@@ -2682,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
- /* Remove the FCB from the skb */
- /* Remove the padded bytes, if there are any */
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
if (amount_pull) {
skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
@@ -2693,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
+
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(*ns);
}
@@ -2706,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /*
- * There's need to check for NETIF_F_HW_VLAN_RX here.
+ /* There's need to check for NETIF_F_HW_VLAN_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
@@ -2725,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
- * until the budget/quota has been reached. Returns the number
- * of frames handled
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
@@ -2746,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
+
rmb();
/* Add another skb for the future */
@@ -2754,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
- bdp->length > priv->rx_buffer_size))
+ bdp->length > priv->rx_buffer_size))
bdp->status = RXBD_LARGE;
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
- bdp->status & RXBD_ERR)) {
+ bdp->status & RXBD_ERR)) {
count_errors(bdp->status, dev);
if (unlikely(!newskb))
@@ -2781,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2800,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
/* update to point at the next skb */
- rx_queue->skb_currx =
- (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
@@ -2813,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
static int gfar_poll(struct napi_struct *napi, int budget)
{
- struct gfar_priv_grp *gfargrp = container_of(napi,
- struct gfar_priv_grp, napi);
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2828,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = budget/num_queues;
/* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
+ * because of the packets that have already arrived
+ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
while (num_queues && left_over_budget) {
-
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
@@ -2843,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
tx_queue = priv->tx_queue[rx_queue->qindex];
tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
- budget_per_queue);
+ rx_cleaned_per_queue =
+ gfar_clean_rx_ring(rx_queue, budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
- if(rx_cleaned_per_queue < budget_per_queue) {
+ if (rx_cleaned_per_queue < budget_per_queue) {
left_over_budget = left_over_budget +
- (budget_per_queue - rx_cleaned_per_queue);
+ (budget_per_queue -
+ rx_cleaned_per_queue);
set_bit(i, &serviced_queues);
num_queues--;
}
@@ -2866,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer */
- /* Otherwise, clear it */
- gfar_configure_coalescing(priv,
- gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void gfar_netpoll(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i;
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2893,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
disable_irq(priv->gfargrp[i].interruptReceive);
disable_irq(priv->gfargrp[i].interruptError);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptError);
enable_irq(priv->gfargrp[i].interruptReceive);
enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2902,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
for (i = 0; i < priv->num_grps; i++) {
disable_irq(priv->gfargrp[i].interruptTransmit);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
}
}
@@ -2954,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
u32 ecntrl = gfar_read(&regs->ecntrl);
/* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
+ * If not, we operate in half-duplex mode.
+ */
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
@@ -2980,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
/* Reduced mode distinguishes
- * between 10 and 100 */
+ * between 10 and 100
+ */
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
@@ -3019,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
* the device based on the flags (this function is called
- * whenever dev->flags is changed */
+ * whenever dev->flags is changed
+ */
static void gfar_set_multi(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -3081,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
/* If we have extended hash tables, we need to
* clear the exact match registers to prepare for
- * setting them */
+ * setting them
+ */
if (priv->extended_hash) {
em_num = GFAR_EM_NUM + 1;
gfar_clear_exact_match(dev);
@@ -3107,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
/* Clears each of the exact match registers to zero, so they
- * don't interfere with normal reception */
+ * don't interfere with normal reception
+ */
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
@@ -3129,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
- * the entry. */
+ * the entry.
+ */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
@@ -3161,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz */
- /* little endian is silly */
+ /* Now copy it into the mac registers backwards, cuz
+ * little endian is silly
+ */
for (idx = 0; idx < ETH_ALEN; idx++)
tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
@@ -3194,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
- netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
/* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a025570d97..8971921cc1c 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
#include "gianfar.h"
extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ int rx_work_limit);
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
- u64 * buf);
+ u64 *buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
else
memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
}
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure, and returned to user space
*/
-static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
}
/* Fills in the drvinfo structure with some basic info */
-static void gfar_gdrvinfo(struct net_device *dev, struct
- ethtool_drvinfo *drvinfo)
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
}
/* Return a dump of the GFAR register space */
-static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
-static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
{
unsigned int count;
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
}
/* Convert ethernet clock ticks to microseconds */
-static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
{
unsigned int count;
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
/* Get the coalescing parameters, and put them in the cvals
* structure. */
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
* Both cvals->*_usecs and cvals->*_frames have to be > 0
* in order for coalescing to be active
*/
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Set up rx coalescing */
/* As of now, we will enable/disable coalescing for all
* queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface */
+ * along with the ethtool interface
+ */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0)) {
for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Fills in rvals with the current ring parameters. Currently,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
/* Change the current ring parameters, stopping the controller if
* necessary so that we don't mess things up while we're in
* motion. We wait for the ring to be clean before reallocating
- * the rings. */
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+ * the rings.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
unsigned long flags;
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ priv->tx_queue[i]->num_txbdfree =
+ priv->tx_queue[i]->tx_ring_size;
}
/* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
static uint32_t gfar_get_msglevel(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
+
priv->msg_enable = data;
}
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_VLAN) {
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
}
}
-static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
int ret = 1;
local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
pr_err("Out of memory\n");
ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
local_rqfpr[j] = priv->ftp_rqfpr[i];
local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
- RQFCR_CLE |RQFCR_AND)) &&
- (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
- !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
- RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
priv->ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
- priv->ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l]);
break;
}
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
- local_rqfcr[k], local_rqfpr[k]);
+ local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
break;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
{
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK;
if (i == RCTRL_PRSDEP_MASK) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
/* Sets the properties for arbitrary filer rule
- * to the first 4 Layer 4 Bytes */
+ * to the first 4 Layer 4 Bytes
+ */
regs->rbifx = 0xC0C1C2C3;
return 0;
}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
{
gfar_set_mask(mask, tab);
- tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
- | RQFCR_AND;
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
tab->fe[tab->index].prop = value;
tab->index++;
}
static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
gfar_set_mask(mask, tab);
tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
tab->index++;
}
-/*
- * For setting a tuple of value and mask of type flag
+/* For setting a tuple of value and mask of type flag
* Example:
* IP-Src = 10.0.0.0/255.0.0.0
* value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
* Further the all masks are one-padded for better hardware efficiency.
*/
static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
switch (flag) {
/* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
/* Translates value and mask for UDP, TCP or SCTP */
static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
- struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
/* Translates value and mask for RAW-IP4 */
static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
- struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ tab);
}
/* Translates value and mask for ETHER spec */
static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 upper_temp_mask = 0;
u32 lower_temp_mask = 0;
+
/* Source address */
if (!is_broadcast_ether_addr(mask->h_source)) {
-
if (is_zero_ether_addr(mask->h_source)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_source[0] << 16
- | mask->h_source[1] << 8
- | mask->h_source[2];
- lower_temp_mask = mask->h_source[3] << 16
- | mask->h_source[4] << 8
- | mask->h_source[5];
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_source[0] << 16 | value->h_source[1]
- << 8 | value->h_source[2],
- upper_temp_mask, RQFCR_PID_SAH, tab);
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_source[3] << 16 | value->h_source[4]
- << 8 | value->h_source[5],
- lower_temp_mask, RQFCR_PID_SAL, tab);
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
}
/* Destination address */
if (!is_broadcast_ether_addr(mask->h_dest)) {
-
/* Special for destination is limited broadcast */
- if ((is_broadcast_ether_addr(value->h_dest)
- && is_zero_ether_addr(mask->h_dest))) {
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
} else {
-
if (is_zero_ether_addr(mask->h_dest)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_dest[0] << 16
- | mask->h_dest[1] << 8
- | mask->h_dest[2];
- lower_temp_mask = mask->h_dest[3] << 16
- | mask->h_dest[4] << 8
- | mask->h_dest[5];
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_dest[0] << 16
- | value->h_dest[1] << 8
- | value->h_dest[2],
- upper_temp_mask, RQFCR_PID_DAH, tab);
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_dest[3] << 16
- | value->h_dest[4] << 8
- | value->h_dest[5],
- lower_temp_mask, RQFCR_PID_DAL, tab);
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
}
}
gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
-
}
/* Convert a rule to binary filter format of gianfar */
static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 vlan = 0, vlan_mask = 0;
u32 id = 0, id_mask = 0;
u32 cfi = 0, cfi_mask = 0;
u32 prio = 0, prio_mask = 0;
-
u32 old_index = tab->index;
/* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ } else if (cfi != VLAN_TAG_PRESENT &&
+ cfi_mask == VLAN_TAG_PRESENT) {
vlan_mask |= RQFPR_CFI;
}
}
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
- RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
- &rule->m_u.tcp_ip4_spec, tab);
+ &rule->m_u.tcp_ip4_spec, tab);
break;
case UDP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
- RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
- &rule->m_u.udp_ip4_spec, tab);
+ &rule->m_u.udp_ip4_spec, tab);
break;
case SCTP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
- gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
- (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
break;
case IP_USER_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
- (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
break;
case ETHER_FLOW:
if (vlan)
gfar_set_parse_bits(vlan, vlan_mask, tab);
gfar_set_ether((struct ethhdr *) &rule->h_u,
- (struct ethhdr *) &rule->m_u, tab);
+ (struct ethhdr *) &rule->m_u, tab);
break;
default:
return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
}
- /* In rare cases the cache can be full while there is free space in hw */
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
if (tab->index > MAX_FILER_CACHE_IDX - 1)
return -EBUSY;
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
/* Copy size filer entries */
static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
+ struct gfar_filer_entry src[0], s32 size)
{
while (size > 0) {
size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
}
/* Delete the contents of the filer-table between start and end
- * and collapse them */
+ * and collapse them
+ */
static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
{
int length;
+
if (end > MAX_FILER_CACHE_IDX || end < begin)
return -EINVAL;
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
/* Make space on the wanted location */
static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
+ struct filer_table *tab)
{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
- > MAX_FILER_CACHE_IDX)
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
+ begin > MAX_FILER_CACHE_IDX)
return -EINVAL;
gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
+ tab->index - length + 1);
tab->index += length;
return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_AND | RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_AND | RQFCR_CLE))
return start;
}
return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_CLE))
return start;
}
return -1;
}
-/*
- * Uses hardwares clustering option to reduce
+/* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
j = i;
while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /*
- * The cluster entries self and the previous one
+ /* The cluster entries self and the previous one
* (a mask) must be identical!
*/
if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
jend = gfar_get_next_cluster_end(j, tab);
if (jend == -1 || iend == -1)
break;
- /*
- * First we make some free space, where our cluster
+
+ /* First we make some free space, where our cluster
* element should be. Then we copy it there and finally
* delete in from its old location.
*/
-
- if (gfar_expand_filer_entries(iend, (jend - j), tab)
- == -EINVAL)
+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
+ -EINVAL)
break;
gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
+ &(tab->fe[jend + 1]), jend - j);
if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j), tab) == -EINVAL)
+ jend + (jend - j),
+ tab) == -EINVAL)
return;
/* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
+ struct gfar_filer_entry *a2,
+ struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
{
u32 temp[4];
temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
b2->ctrl |= temp[2];
}
-/*
- * Generate a list consisting of masks values with their start and
+/* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together (glued by ANDs) in mask_table
*/
static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i, and_index = 0, block_index = 1;
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
and_index++;
}
/* cluster starts and ends will be separated because they should
- * hold their position */
+ * hold their position
+ */
if (tab->fe[i].ctrl & RQFCR_CLE)
block_index++;
/* A not set AND indicates the end of a depended block */
if (!(tab->fe[i].ctrl & RQFCR_AND))
block_index++;
-
}
mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
return and_index;
}
-/*
- * Sorts the entries of mask_table by the values of the masks.
+/* Sorts the entries of mask_table by the values of the masks.
* Important: The 0xFF80 flags of the first and last entry of a
* block must hold their position (which queue, CLusterEnable, ReJEct,
* AND)
*/
static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
+ struct filer_table *temp_table, u32 and_index)
{
/* Pointer to compare function (_asc or _desc) */
int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
gfar_comp = &gfar_comp_desc;
for (i = 0; i < and_index; i++) {
-
if (prev != mask_table[i].block) {
old_first = mask_table[start].start + 1;
old_last = mask_table[i - 1].end;
sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
/* Toggle order for every block. This makes the
- * thing more efficient! */
+ * thing more efficient!
+ */
if (gfar_comp == gfar_comp_desc)
gfar_comp = &gfar_comp_asc;
else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
new_last = mask_table[i - 1].end;
gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND
- );
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND);
start = i;
size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
size++;
prev = mask_table[i].block;
}
-
}
-/*
- * Reduces the number of masks needed in the filer table to save entries
+/* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block. A depended block is
* identified by gluing ANDs or CLE. The sorting order toggles after every
* block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
s32 ret = 0;
/* We need a copy of the filer table because
- * we want to change its order */
+ * we want to change its order
+ */
temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
if (mask_table == NULL) {
ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
gfar_sort_mask_table(mask_table, temp_table, and_index);
/* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says */
+ * the real one in the order the mask table says
+ */
for (i = 0; i < and_index; i++) {
size = mask_table[i].end - mask_table[i].start + 1;
gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
}
/* And finally we just have to check for duplicated masks and drop the
- * second ones */
+ * second ones
+ */
for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
if (tab->fe[i].ctrl == 0x80) {
if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
/* Two identical ones found!
- * So drop the second one! */
+ * So drop the second one!
+ */
gfar_trim_filer_entries(i, i, tab);
} else
/* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i = 0;
if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
lock_rx_qs(priv);
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
for (; i < MAX_FILER_IDX - 1; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
- * because that's what people expect */
+ * because that's what people expect
+ */
gfar_write_filer(priv, i, 0x20, 0x0);
unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
}
static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
- struct gfar_private *priv)
+ struct gfar_private *priv)
{
if (flow->flow_type & FLOW_EXT) {
if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
netdev_warn(priv->ndev,
- "User-specific data not supported!\n");
+ "User-specific data not supported!\n");
if (~flow->m_ext.vlan_etype)
netdev_warn(priv->ndev,
- "VLAN-etype not supported!\n");
+ "VLAN-etype not supported!\n");
}
if (flow->flow_type == IP_USER_FLOW)
if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
netdev_warn(priv->ndev,
- "IP-Version differing from IPv4 not supported!\n");
+ "IP-Version differing from IPv4 not supported!\n");
return 0;
}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
return -ENOMEM;
/* Now convert the existing filer data from flow_spec into
- * filer tables binary format */
+ * filer tables binary format
+ */
list_for_each_entry(j, &priv->rx_list.list, list) {
ret = gfar_convert_to_filer(&j->fs, tab);
if (ret == -EBUSY) {
- netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
goto end;
}
if (ret == -1) {
- netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
goto end;
}
}
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
gfar_optimize_filer_masks(tab);
pr_debug("\n\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
goto end;
}
-end: kfree(tab);
+end:
+ kfree(tab);
return ret;
}
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
}
static int gfar_add_cls(struct gfar_private *priv,
- struct ethtool_rx_flow_spec *flow)
+ struct ethtool_rx_flow_spec *flow)
{
struct ethtool_flow_spec_container *temp, *comp;
int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
list_add(&temp->list, &priv->rx_list.list);
goto process;
} else {
-
list_for_each_entry(comp, &priv->rx_list.list, list) {
if (comp->fs.location > flow->location) {
list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
}
if (comp->fs.location == flow->location) {
netdev_err(priv->ndev,
- "Rule not added: ID %d not free!\n",
- flow->location);
+ "Rule not added: ID %d not free!\n",
+ flow->location);
ret = -EBUSY;
goto clean_mem;
}
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
}
return ret;
-
}
static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
}
static int gfar_get_cls_all(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct ethtool_flow_spec_container *comp;
u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping =
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
return 0;
}
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gfar_phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f80485..21c6574c5f1 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
- printk("%02x", *((u8 *) (i)));
+ printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673f090..3f4391bede8 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
ringptr->pdl = pdlptr + 1;
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
/*
* Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
ringptr->pdl = pdlptr; /* +1; */
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
return roundup(MAX_TX_FRAG * 2 + 2, 4);
}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = (void *) NULL;
+ lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
}
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c8ea1..3735bfa5360 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+ i596_add_cmd(dev, &lp->set_add);
lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+ i596_add_cmd(dev, &lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
lp = netdev_priv(dev);
while (lp->cmd_head) {
- cmd = (struct i596_cmd *)lp->cmd_head;
+ cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
lp->i596_config[8] |= 0x01;
}
- i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ i596_add_cmd(dev, &lp->set_conf);
}
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4bc93..353f57f675d 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
}
#endif
- ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
- if((void *)ptr > (void *)dev->mem_end)
+ if(ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f14dbf..cb66f574dc9 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
return 0;
}
-/**
- * allocates memory for a queue and registers pages in phyp
- */
+/* allocates memory for a queue and registers pages in phyp */
static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada720b42ff..535f94fac4a 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1249,20 +1249,35 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
const struct firmware *fw = nic->fw;
u8 timer, bundle, min_size;
int err = 0;
+ bool required = false;
/* do not load u-code for ICH devices */
if (nic->flags & ich)
return NULL;
- /* Search for ucode match against h/w revision */
- if (nic->mac == mac_82559_D101M)
+ /* Search for ucode match against h/w revision
+ *
+ * Based on comments in the source code for the FreeBSD fxp
+ * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
+ *
+ * "fixes for bugs in the B-step hardware (specifically, bugs
+ * with Inline Receive)."
+ *
+ * So we must fail if it cannot be loaded.
+ *
+ * The other microcode files are only required for the optional
+ * CPUSaver feature. Nice to have, but no reason to fail.
+ */
+ if (nic->mac == mac_82559_D101M) {
fw_name = FIRMWARE_D101M;
- else if (nic->mac == mac_82559_D101S)
+ } else if (nic->mac == mac_82559_D101S) {
fw_name = FIRMWARE_D101S;
- else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
+ } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
fw_name = FIRMWARE_D102E;
- else /* No ucode on other devices */
+ required = true;
+ } else { /* No ucode on other devices */
return NULL;
+ }
/* If the firmware has not previously been loaded, request a pointer
* to it. If it was previously loaded, we are reinitializing the
@@ -1273,10 +1288,17 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- netif_err(nic, probe, nic->netdev,
- "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
- return ERR_PTR(err);
+ if (required) {
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
+ return ERR_PTR(err);
+ } else {
+ netif_info(nic, probe, nic->netdev,
+ "CPUSaver disabled. Needs \"%s\": %d\n",
+ fw_name, err);
+ return NULL;
+ }
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3103f0b6bf5..736a7d987db 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1851,6 +1851,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e492..3d683952876 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
}
/**
- * e1000_reset_hw: reset the hardware completely
+ * e1000_reset_hw - reset the hardware completely
* @hw: Struct containing variables accessed by shared code
*
* Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/**
- * e1000_init_hw: Performs basic configuration of the adapter.
+ * e1000_init_hw - Performs basic configuration of the adapter.
* @hw: Struct containing variables accessed by shared code
*
* Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* @hw: Struct containing variables accessed by shared code
* @speed: Speed of the connection
* @duplex: Duplex setting of the connection
-
+ *
* Detects the current speed and duplex settings of the hardware.
*/
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
* @data: data to write to the PHY
-
+ *
* Writes a value to a PHY register
*/
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7483ca0a628..3bfbb8df898 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
e1000_release_manageability(adapter);
}
-/**
- * Dump the eeprom for users having checksum issues
- **/
+/* Dump the eeprom for users having checksum issues */
static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->features |= netdev->hw_features;
- netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->hw_features |= (NETIF_F_RXCSUM |
+ NETIF_F_RXALL |
+ NETIF_F_RXFCS);
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= (NETIF_F_TSO |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SG);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
mmiowb();
}
-/**
- * 82547 workaround to avoid controller hang in half-duplex environment.
+/* 82547 workaround to avoid controller hang in half-duplex environment.
* The workaround is to avoid queuing a large packet that would span
* the internal Tx FIFO ring boundary by notifying the stack to resend
* the packet at a later time. This gives the Tx FIFO an opportunity to
* flush all packets. When that occurs, we reset the Tx FIFO pointers
* to the beginning of the Tx FIFO.
- **/
+ */
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 1f063dcd8f8..0b3bade957f 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1680,16 +1680,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
- * Check several times, if Sync and Config
- * both are consistently 1 then simply ignore
- * the Invalid bit and restart Autoneg
+ * Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10);
rxcw = er32(RXCW);
- if ((rxcw & E1000_RXCW_IV) &&
- !((rxcw & E1000_RXCW_SYNCH) &&
- (rxcw & E1000_RXCW_C))) {
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
mac->serdes_has_link = false;
mac->serdes_link_state =
e1000_serdes_link_down;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6e6fffb3458..cd153326c3c 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 905e2147d91..0349e2478df 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) &&
@@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
}
if (adapter->itr_setting != 0)
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
return 0;
}
@@ -2062,6 +2061,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 623e30b9964..95b245310f1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2159,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
}
/**
- * @e1000_alloc_ring - allocate memory for a ring structure
+ * e1000_alloc_ring_dma - allocate memory for a ring structure
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2474,6 +2474,30 @@ set_itr_now:
}
/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+ if (adapter->msix_entries) {
+ int vector;
+
+ for (vector = 0; vector < adapter->num_vectors; vector++)
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+ } else {
+ ew32(ITR, new_itr);
+ }
+}
+
+/**
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
@@ -3059,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0))
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT);
/* Auto-Mask interrupts upon ICR access */
@@ -3486,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
}
}
@@ -4576,7 +4600,7 @@ link_up:
adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- ew32(ITR, 1000000000 / (itr * 256));
+ e1000e_write_itr(adapter, itr);
}
/* Cause software interrupt to ensure Rx ring is cleaned */
@@ -6191,7 +6215,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
@@ -6241,7 +6266,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -6251,13 +6276,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
+ dev_err(&pdev->dev,
+ "NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->perm_addr);
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565bc2..dfbfa7fd98c 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- e_info("%s Enabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Enabled\n",
+ opt->name);
return 0;
case OPTION_DISABLED:
- e_info("%s Disabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Disabled\n",
+ opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- e_info("%s set to %i\n", opt->name, *value);
+ dev_info(&adapter->pdev->dev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- e_info("%s\n", ent->str);
+ dev_info(&adapter->pdev->dev, "%s\n",
+ ent->str);
return 0;
}
}
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
- opt->err);
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- e_notice("Warning: no configuration for board #%i\n", bd);
- e_notice("Using defaults for all values\n");
+ dev_notice(&adapter->pdev->dev,
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(&adapter->pdev->dev,
+ "Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
* default values
*/
if (adapter->itr > 4)
- e_info("%s set to default %d\n", opt.name,
- adapter->itr);
+ dev_info(&adapter->pdev->dev,
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
}
adapter->itr_setting = adapter->itr;
switch (adapter->itr) {
case 0:
- e_info("%s turned off\n", opt.name);
+ dev_info(&adapter->pdev->dev, "%s turned off\n",
+ opt.name);
break;
case 1:
- e_info("%s set to dynamic mode\n", opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
break;
case 4:
- e_info("%s set to simplified (2000-8000 ints) mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
break;
default:
/*
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f2c92..10efcd88dca 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f393a5..9e572dd29ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
- (hw->mac.type > e1000_82575 ? 8 : 4)))
-#define IGB_MAX_RX_QUEUES_I210 4
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 16
-#define IGB_MAX_TX_QUEUES_I210 4
-#define IGB_MAX_TX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
#define IGB_82576_VF_DEV_ID 0x10CA
#define IGB_I350_VF_DEV_ID 0x1520
+/* NVM version defines */
+#define IGB_MAJOR_MASK 0xF000
+#define IGB_MINOR_MASK 0x0FF0
+#define IGB_BUILD_MASK 0x000F
+#define IGB_COMB_VER_MASK 0x00FF
+#define IGB_MAJOR_SHIFT 12
+#define IGB_MINOR_SHIFT 4
+#define IGB_COMB_VER_SHFT 8
+#define IGB_NVM_VER_INVALID 0xFFFF
+#define IGB_ETRACK_SHIFT 16
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ char fw_version[32];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
+extern void igb_set_fw_version(struct igb_adapter *);
#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f963bd..a19c84cad0e 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
hw->nvm.ops.update(hw);
+ igb_set_fw_version(adapter);
kfree(eeprom_buff);
return ret_val;
}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- u16 eeprom_data;
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
-
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * 82575 controllers
+ */
+ strlcpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+#ifdef CONFIG_IGB_PTP
+static int igb_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL) |
+ (1 << HWTSTAMP_FILTER_SOME) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#endif
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_coalesce = igb_set_coalesce,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+#ifdef CONFIG_IGB_PTP
+ .get_ts_info = igb_ethtool_get_ts_info,
+#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8cd36..1050411e7ca 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
#endif
#include "igb.h"
-#define MAJ 3
-#define MIN 4
-#define BUILD 7
+#define MAJ 4
+#define MIN 0
+#define BUILD 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
- /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211))
- numvecs = 4;
-
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
@@ -1505,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter)
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ igb_reset_phy(&adapter->hw);
+
if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
- igb_reset_phy(&adapter->hw);
}
/**
@@ -1821,6 +1817,69 @@ static const struct net_device_ops igb_netdev_ops = {
};
/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 major, build, patch, fw_version;
+ u32 etrack_id;
+
+ hw->nvm.ops.read(hw, 5, 1, &fw_version);
+ if (adapter->hw.mac.type != e1000_i211) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
+
+ /* combo image version needs to be found */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != IGB_NVM_VER_INVALID)) {
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* Only display Option Rom if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != IGB_NVM_VER_INVALID) &&
+ (comb_verl != IGB_NVM_VER_INVALID))) {
+ major = comb_verl >> IGB_COMB_VER_SHFT;
+ build = (comb_verl << IGB_COMB_VER_SHFT) |
+ (comb_verh >> IGB_COMB_VER_SHFT);
+ patch = comb_verh & IGB_COMB_VER_MASK;
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x, %d.%d.%d",
+ (fw_version & IGB_MAJOR_MASK) >>
+ IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >>
+ IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK),
+ etrack_id, major, build, patch);
+ goto out;
+ }
+ }
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK), etrack_id);
+ } else {
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK));
+ }
+out:
+ return;
+}
+
+/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in igb_pci_tbl
@@ -2030,6 +2089,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2400,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u32 max_rss_queues;
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
@@ -2370,40 +2433,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
} else
adapter->vfs_allocated_count = max_vfs;
break;
- case e1000_i210:
- case e1000_i211:
- adapter->vfs_allocated_count = 0;
- break;
default:
break;
}
#endif /* CONFIG_PCI_IOV */
+
+ /* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
case e1000_i210:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
- num_online_cpus());
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
break;
+ }
+
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ /* Determine if we need to pair queues. */
+ switch (hw->mac.type) {
+ case e1000_82575:
case e1000_i211:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
- num_online_cpus());
+ /* Device supports enough interrupts without queue pairing. */
break;
+ case e1000_82576:
+ /*
+ * If VFs are going to be allocated with RSS queues then we
+ * should pair the queues in order to conserve interrupts due
+ * to limited supply.
+ */
+ if ((adapter->rss_queues > 1) &&
+ (adapter->vfs_allocated_count > 6))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
default:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
- num_online_cpus());
+ /*
+ * If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
- /* i350 cannot do RSS and SR-IOV at the same time */
- if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
- adapter->rss_queues = 1;
-
- /*
- * if rss_queues > 4 or vfs are going to be allocated with rss_queues
- * then we should combine the queues into a queue pair in order to
- * conserve interrupts due to limited supply
- */
- if ((adapter->rss_queues > 4) ||
- ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -4917,7 +5009,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
unsigned int device_id;
u16 thisvf_devfn;
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
switch (adapter->hw.mac.type) {
@@ -5326,7 +5418,7 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
/* generate a new mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- random_ether_addr(vf_mac);
+ eth_random_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5686,6 +5778,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ *
* returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7090,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
}
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(E1000_RTTBCNRM, 0x14);
wr32(E1000_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa5072..c846ea9131a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
void igb_ptp_remove(struct igb_adapter *adapter)
{
- cancel_delayed_work_sync(&adapter->overflow_work);
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ case e1000_i210:
+ case e1000_i350:
+ case e1000_82580:
+ case e1000_82576:
+ cancel_delayed_work_sync(&adapter->overflow_work);
+ break;
+ default:
+ return;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b07f94..0696abfe994 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ *
* returns true if ring is completely cleaned
**/
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc42603..eea0e10ce12 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
return err;
}
-/** e1000_rlpml_set_vf - Set the maximum receive packet length
+/**
+ * e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
- * @index receive address array register
+ * @index: receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index 99b69adb4a0..bf9a220f71f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -32,6 +32,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/pci_ids.h>
#include "ixgb_hw.h"
#include "ixgb_ids.h"
@@ -96,7 +97,7 @@ static u32 ixgb_mac_reset(struct ixgb_hw *hw)
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) {
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
ctrl_reg = /* Enable interrupt from XFP and SerDes */
IXGB_CTRL1_GPI0_EN |
IXGB_CTRL1_SDP6_DIR |
@@ -271,7 +272,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
}
/* update phy type for sun specific board */
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
phy_type = ixgb_phy_type_bcm;
return phy_type;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 2a58847f46e..32c1b302d79 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -33,11 +33,6 @@
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
-#define INTEL_VENDOR_ID 0x8086
-#define INTEL_SUBVENDOR_ID 0x8086
-#define SUN_VENDOR_ID 0x108E
-#define SUN_SUBVENDOR_ID 0x108E
-
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363d810..d05fc95befc 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -54,13 +54,13 @@ MODULE_PARM_DESC(copybreak,
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
/* required last entry */
@@ -195,7 +195,7 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
{
u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
IXGB_INT_TXDW | IXGB_INT_LSC;
- if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
val |= IXGB_INT_GPI0;
IXGB_WRITE_REG(&adapter->hw, IMS, val);
IXGB_WRITE_FLUSH(&adapter->hw);
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
#endif
/**
- * ixgb_io_error_detected() - called when PCI error is detected
- * @pdev pointer to pci device with error
- * @state pci channel state after error
+ * ixgb_io_error_detected - called when PCI error is detected
+ * @pdev: pointer to pci device with error
+ * @state: pci channel state after error
*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06bc5c4..5fd5d04c26c 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
-
+ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7af291e236b..b9623e9ea89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -77,17 +77,18 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
*/
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -113,7 +114,7 @@
#define IXGBE_MAX_VFTA_ENTRIES 128
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
-#define VMDQ_P(p) ((p) + adapter->num_vfs)
+#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
@@ -130,7 +131,6 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
- struct pci_dev *vfdev;
};
struct vf_macvlans {
@@ -278,10 +278,16 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
- int indices;
- int mask;
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp;
+#define IXGBE_82599_VMDQ_8Q_MASK 0x78
+#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
+#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
+
/*
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
* this is twice the size of a half page we need to double the page order
@@ -315,7 +321,7 @@ struct ixgbe_ring_container {
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
-/* MAX_MSIX_Q_VECTORS of these are allocated,
+/* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
@@ -401,11 +407,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
-#define MAX_MSIX_Q_VECTORS_82599 64
+#define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
-#define MAX_MSIX_Q_VECTORS_82598 16
+#define MAX_Q_VECTORS_82598 16
-#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
+#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1
@@ -427,35 +433,33 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
-#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
+#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
@@ -496,7 +500,7 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
/* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc;
@@ -507,8 +511,8 @@ struct ixgbe_adapter {
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
@@ -561,6 +565,7 @@ struct ixgbe_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ int rx_hwtstamp_filter;
u32 base_incval;
u32 cycle_speed;
#endif /* CONFIG_IXGBE_PTP */
@@ -686,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
@@ -695,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
@@ -704,6 +710,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
+extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
@@ -718,6 +725,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2703f..50fc137501d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Determine 1G link capabilities off of SFP+ type */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
@@ -1023,6 +1025,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -2104,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41feb0f..90e41db3cb6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2848,6 +2848,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
@@ -3132,7 +3157,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
- * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
* the EEPROM
* @hw: pointer to hardware structure
* @wwnn_prefix: the alternative WWNN prefix
@@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
- for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
/*
* The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Reset the bit assigned to the PF
+ * emulation mode NICs. Do not set the bits assigned to the PF
*/
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
- pfvfspoof ^= (1 << pf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+ pfvfspoof &= (1 << pf_target_shift) - 1;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * Remaining pools belong to the PF so they do not need to have
+ * anti-spoofing enabled.
+ */
+ for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
}
/**
@@ -3325,6 +3352,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* ixgbe_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
* @length: size of EEPROM to calculate a checksum for
+ *
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6222fdb3d3f..d813d1188c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 8bfaaee5ac5..9bc17c0cb97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -180,67 +180,83 @@ out:
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- *pfc_en = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
+ for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ if (tc_config[tc].dcb_pfc != pfc_disabled)
+ *pfc_en |= 1 << tc;
+ }
}
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- refill[i] = p->data_credits_refill;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
}
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- max[i] = cfg->tc_config[i].desc_credits_max;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
}
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- bwgid[i] = p->bwg_id;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
}
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 *ptype)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- ptype[i] = p->prio_type;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ ptype[tc] = tc_config[tc].path[direction].prio_type;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
}
+out:
+ return tc;
}
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
{
- int i, up;
- unsigned long bitmap;
+ u8 up;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
- for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
- map[up] = i;
- }
+ for (up = 0; up < MAX_USER_PRIORITY; up++)
+ map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 24333b71816..1f4108ee154 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 5164a21b13c..f1e002d5fa8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- int err = 0;
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
/* verify there is something to do, if not then exit */
- if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- goto out;
-
- if (state > 0) {
- err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- } else {
- err = ixgbe_setup_tc(netdev, 0);
- }
-
- if (err)
+ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
-
+ err = ixgbe_setup_tc(netdev,
+ state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out:
- return err ? 1 : 0;
+ return !!err;
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (err)
goto err_out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
-
err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out:
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1ec371..4104ea25d81 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ ixgbe_link_speed supported_link;
u32 link_speed = 0;
+ bool autoneg;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg);
-
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ecmd->supported |= SUPPORTED_100baseT_Full;
- break;
- default:
- break;
- }
-
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised) {
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- /*
- * Default advertised modes in case
- * phy.autoneg_advertised isn't set.
- */
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- if (hw->mac.type == ixgbe_mac_X540)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
-
- if (hw->phy.media_type == ixgbe_media_type_copper) {
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- }
- } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
- /* Set as FIBRE until SERDES defined in kernel */
- if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
- (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- } else {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- }
+ hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
+
+ /* set the supported link speeds */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+
+ /* set the advertised speeds */
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ /* default modes in case phy.autoneg_advertised isn't set */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
}
- /* Get PHY type */
+ if (autoneg) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
- /* Copper 10G-BASET */
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
break;
case ixgbe_phy_qt:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- switch (adapter->hw.phy.sfp_type) {
/* SFP+ devices, further checking needed */
+ switch (adapter->hw.phy.sfp_type) {
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
case ixgbe_sfp_type_srlr_core0:
case ixgbe_sfp_type_srlr_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_unknown:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
@@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- int num_vectors;
u16 tx_itr_param, rx_itr_param;
bool need_reset = false;
@@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_vectors = 1;
-
- for (i = 0; i < num_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
@@ -2274,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
{
cmd->data = 0;
- /* if RSS is disabled then report no hashing */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return 0;
-
/* Report default options for RSS on ixgbe */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933d67d..ae73ef14fdf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
- * @ddp - ptr to the ixgbe_fcoe_ddp
+ * @ddp: ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
@@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
udelay(100);
}
if (ddp->sgl)
- pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
if (ddp->pool) {
- pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
- struct pci_pool *pool;
- unsigned int cpu;
if (!netdev || !sgl)
return 0;
@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0;
fcoe = &adapter->fcoe;
- if (!fcoe->pool) {
- e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
- return 0;
- }
-
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
ixgbe_fcoe_clear_ddp(ddp);
+
+ if (!fcoe->ddp_pool) {
+ e_warn(drv, "No ddp_pool resources allocated\n");
+ return 0;
+ }
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+ if (!ddp_pool->pool) {
+ e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
+ goto out_noddp;
+ }
+
/* setup dma from scsi command sgl */
- dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
- return 0;
+ goto out_noddp;
}
/* alloc the udl from per cpu ddp pool */
- cpu = get_cpu();
- pool = *per_cpu_ptr(fcoe->pool, cpu);
- ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
- ddp->pool = pool;
+ ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
+ ddp_pool->noddp++;
goto out_noddp_free;
}
@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
+ ddp_pool->noddp_ext_buff++;
goto out_noddp_free;
}
@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1;
out_noddp_free:
- pci_pool_free(pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
- pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
put_cpu();
return 0;
}
@@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
break;
/* unmap the sg list when FCPRSP is received */
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
- pci_unmap_sg(adapter->pdev, ddp->sgl,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
ddp->sgl = NULL;
@@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
return 0;
}
-static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
{
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
- for_each_possible_cpu(cpu) {
- pool = per_cpu_ptr(fcoe->pool, cpu);
- if (*pool)
- pci_pool_destroy(*pool);
- }
- free_percpu(fcoe->pool);
- fcoe->pool = NULL;
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ if (ddp_pool->pool)
+ dma_pool_destroy(ddp_pool->pool);
+ ddp_pool->pool = NULL;
}
-static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
+ struct device *dev,
+ unsigned int cpu)
{
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ struct dma_pool *pool;
char pool_name[32];
- fcoe->pool = alloc_percpu(struct pci_pool *);
- if (!fcoe->pool)
- return;
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- /* allocate pci pool for each cpu */
- for_each_possible_cpu(cpu) {
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- pool = per_cpu_ptr(fcoe->pool, cpu);
- *pool = pci_pool_create(pool_name,
- adapter->pdev, IXGBE_FCPTR_MAX,
- IXGBE_FCPTR_ALIGN, PAGE_SIZE);
- if (!*pool) {
- e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
- ixgbe_fcoe_ddp_pools_free(fcoe);
- return;
- }
- }
+ pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!pool)
+ return -ENOMEM;
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ ddp_pool->pool = pool;
+ ddp_pool->noddp = 0;
+ ddp_pool->noddp_ext_buff = 0;
+
+ return 0;
}
/**
@@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
*/
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_q, fcoe_i;
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- unsigned int cpu;
-
- if (!fcoe->pool) {
- spin_lock_init(&fcoe->lock);
-
- ixgbe_fcoe_ddp_pools_alloc(adapter);
- if (!fcoe->pool) {
- e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
- return;
- }
-
- /* Extra buffer to be shared by all DDPs for HW work around */
- fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (fcoe->extra_ddp_buffer == NULL) {
- e_err(drv, "failed to allocated extra DDP buffer\n");
- goto out_ddp_pools;
- }
+ int i, fcoe_q, fcoe_i;
+ u32 etqf;
- fcoe->extra_ddp_buffer_dma =
- dma_map_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma)) {
- e_err(drv, "failed to map extra DDP buffer\n");
- goto out_extra_ddp_buffer;
- }
+ /* Minimal functionality for FCoE requires at least CRC offloads */
+ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
+ return;
- /* Alloc per cpu mem to count the ddp alloc failure number */
- fcoe->pcpu_noddp = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp) {
- e_err(drv, "failed to alloc noddp counter\n");
- goto out_pcpu_noddp_alloc_fail;
- }
+ /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
+ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp_ext_buff) {
- e_err(drv, "failed to alloc noddp extra buff cnt\n");
- goto out_pcpu_noddp_extra_buff_alloc_fail;
- }
+ /* leave registers un-configured if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return;
- for_each_possible_cpu(cpu) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
- }
+ /* Use one or more Rx queues for FCoE by redirection table */
+ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcoe_i = fcoe->offset + (i % fcoe->indices);
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- /* Enable L2 eth type filter for FCoE */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
- (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
- /* Enable L2 eth type filter for FIP */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
- (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
- if (adapter->ring_feature[RING_F_FCOE].indices) {
- /* Use multiple rx queues for FCoE by redirection table */
- for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
- fcoe_i = f->mask + i % f->indices;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
- }
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- } else {
- /* Use single rx queue for FCoE */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+ /* Enable L2 EtherType filter for FIP */
+ etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
}
- /* send FIP frames to the first FCoE queue */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
+
+ /* Send FIP frames to the first FCoE queue */
+ fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
- IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
+ /* Configure FCoE Rx control */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
+ IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
- return;
-out_pcpu_noddp_extra_buff_alloc_fail:
- free_percpu(fcoe->pcpu_noddp);
-out_pcpu_noddp_alloc_fail:
- dma_unmap_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
-out_extra_ddp_buffer:
- kfree(fcoe->extra_ddp_buffer);
-out_ddp_pools:
- ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
- * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
* @adapter : ixgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
-void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{
- int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ int cpu, i;
- if (!fcoe->pool)
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+
+ for_each_possible_cpu(cpu)
+ ixgbe_fcoe_dma_pool_free(fcoe, cpu);
+
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
- free_percpu(fcoe->pcpu_noddp);
- free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer);
- ixgbe_fcoe_ddp_pools_free(fcoe);
+
+ fcoe->extra_ddp_buffer = NULL;
+ fcoe->extra_ddp_buffer_dma = 0;
+}
+
+/**
+ * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
+ * @adapter: ixgbe adapter
+ *
+ * Sets up ddp context resouces
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct device *dev = &adapter->pdev->dev;
+ void *buffer;
+ dma_addr_t dma;
+ unsigned int cpu;
+
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
+ return 0;
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (!buffer) {
+ e_err(drv, "failed to allocate extra DDP buffer\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ kfree(buffer);
+ return -ENOMEM;
+ }
+
+ fcoe->extra_ddp_buffer = buffer;
+ fcoe->extra_ddp_buffer_dma = dma;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
+ if (!err)
+ continue;
+
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_free_fcoe_ddp_resources(adapter);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+ return -EINVAL;
+
+ fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
+
+ if (!fcoe->ddp_pool) {
+ e_err(drv, "failed to allocate percpu DDP resources\n");
+ return -ENOMEM;
+ }
+
+ adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+ return 0;
+}
+
+static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ adapter->netdev->fcoe_ddp_xid = 0;
+
+ if (!fcoe->ddp_pool)
+ return;
+
+ free_percpu(fcoe->ddp_pool);
+ fcoe->ddp_pool = NULL;
}
/**
@@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ atomic_inc(&fcoe->refcnt);
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_enable;
+ return -EINVAL;
- atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- goto out_enable;
+ return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Allocate per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_enable(adapter);
+ /* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
- netdev->features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ netdev_features_change(netdev);
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
- netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_enable:
- return rc;
+ return 0;
}
/**
@@ -797,41 +831,35 @@ out_enable:
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_disable;
+ if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
+ return -EINVAL;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- goto out_disable;
-
- if (!atomic_dec_and_test(&fcoe->refcnt))
- goto out_disable;
+ return -EINVAL;
e_info(drv, "Disabling FCoE offload features.\n");
- netdev->features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FSO;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = 0;
- netdev_features_change(netdev);
-
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Free per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_disable(adapter);
+
+ /* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
- ixgbe_cleanup_fcoe(adapter);
+ netdev->features &= ~NETIF_F_FCOE_MTU;
+
+ netdev_features_change(netdev);
+
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_disable:
- return rc;
+ return 0;
}
/**
@@ -960,3 +988,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
return 0;
}
+
+/**
+ * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
+ * @adapter - pointer to the device adapter structure
+ *
+ * Return : TC that FCoE is mapped to
+ */
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 1dbed17c810..bf724da9937 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
- struct pci_pool *pool;
+ struct dma_pool *pool;
+};
+
+/* per cpu variables */
+struct ixgbe_fcoe_ddp_pool {
+ struct dma_pool *pool;
+ u64 noddp;
+ u64 noddp_ext_buff;
};
struct ixgbe_fcoe {
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt;
spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
- unsigned char *extra_ddp_buffer;
+ void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
- u64 __percpu *pcpu_noddp;
- u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB
u8 up;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index c377706e81a..17ecbcedd54 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -28,28 +28,83 @@
#include "ixgbe.h"
#include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for RSS to the assigned rings.
+ * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
*
**/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
+ u16 reg_idx;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
return false;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ }
+
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
+
+#ifdef IXGBE_FCOE
+ /* nothing to do if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return true;
+
+ /* The work is already done if the FCoE ring is shared */
+ if (fcoe->offset < tcs)
+ return true;
+
+ /* The FCoE rings exist separately, we need to move their reg_idx */
+ if (fcoe->indices) {
+ u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+ }
+#endif /* IXGBE_FCOE */
return true;
}
-#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
@@ -64,42 +119,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- *tx = tc << 2;
- *rx = tc << 3;
+ /* TxQs/TC: 4 RxQs/TC: 8 */
+ *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
+ *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
- if (tc < 3) {
- *tx = tc << 5;
- *rx = tc << 4;
- } else if (tc < 5) {
- *tx = ((tc + 2) << 4);
- *rx = tc << 4;
- } else if (tc < num_tcs) {
- *tx = ((tc + 8) << 3);
- *rx = tc << 4;
- }
+ /*
+ * TCs : TC0/1 TC2/3 TC4-7
+ * TxQs/TC: 32 16 8
+ * RxQs/TC: 16 16 16
+ */
+ *rx = tc << 4;
+ if (tc < 3)
+ *tx = tc << 5; /* 0, 32, 64 */
+ else if (tc < 5)
+ *tx = (tc + 2) << 4; /* 80, 96 */
+ else
+ *tx = (tc + 8) << 3; /* 104, 112, 120 */
} else {
- *rx = tc << 5;
- switch (tc) {
- case 0:
- *tx = 0;
- break;
- case 1:
- *tx = 64;
- break;
- case 2:
- *tx = 96;
- break;
- case 3:
- *tx = 112;
- break;
- default:
- break;
- }
+ /*
+ * TCs : TC0 TC1 TC2/3
+ * TxQs/TC: 64 32 16
+ * RxQs/TC: 32 32 32
+ */
+ *rx = tc << 5;
+ if (tc < 2)
+ *tx = tc << 6; /* 0, 64 */
+ else
+ *tx = (tc + 4) << 4; /* 96, 112 */
}
- break;
default:
break;
}
@@ -112,106 +162,115 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- int i, j, k;
+ unsigned int tx_idx, rx_idx;
+ int tc, offset, rss_i, i;
u8 num_tcs = netdev_get_num_tc(dev);
- if (!num_tcs)
+ /* verify we have DCB queueing enabled before proceeding */
+ if (num_tcs <= 1)
return false;
- for (i = 0, k = 0; i < num_tcs; i++) {
- unsigned int tx_s, rx_s;
- u16 count = dev->tc_to_txq[i].count;
+ rss_i = adapter->ring_feature[RING_F_RSS].indices;
- ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
- for (j = 0; j < count; j++, k++) {
- adapter->tx_ring[k]->reg_idx = tx_s + j;
- adapter->rx_ring[k]->reg_idx = rx_s + j;
- adapter->tx_ring[k]->dcb_tc = i;
- adapter->rx_ring[k]->dcb_tc = i;
+ for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+ ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
+ for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
+ adapter->tx_ring[offset + i]->reg_idx = tx_idx;
+ adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->tx_ring[offset + i]->dcb_tc = tc;
+ adapter->rx_ring[offset + i]->dcb_tc = tc;
}
}
return true;
}
-#endif
+#endif
/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
*
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+ */
+static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
int i;
- bool ret = false;
-
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
+ u16 reg_idx;
+
+ /* only proceed if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+#ifdef IXGBE_FCOE
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+#endif
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
-/**
- * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
- *
- */
-static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- int i;
- u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_tx_queues; i++, reg_idx++)
+ adapter->tx_ring[i]->reg_idx = reg_idx;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+#endif
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
return true;
}
-#endif /* IXGBE_FCOE */
/**
- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
- * SR-IOV doesn't use any descriptor rings but changes the default if
- * no other mapping is used.
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
*
- */
-static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+ **/
+static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
- adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
- adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
- if (adapter->num_vfs)
- return true;
- else
- return false;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
/**
@@ -231,186 +290,384 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
- if (ixgbe_cache_ring_sriov(adapter))
- return;
-
#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_cache_ring_dcb(adapter))
+ if (ixgbe_cache_ring_dcb_sriov(adapter))
return;
-#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_cache_ring_fcoe(adapter))
+ if (ixgbe_cache_ring_dcb(adapter))
return;
-#endif /* IXGBE_FCOE */
- if (ixgbe_cache_ring_fdir(adapter))
+#endif
+ if (ixgbe_cache_ring_sriov(adapter))
return;
- if (ixgbe_cache_ring_rss(adapter))
- return;
+ ixgbe_cache_ring_rss(adapter);
}
-/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
- * @adapter: board private structure to initialize
- *
- * IOV doesn't actually use anything, so just NAK the
- * request for now and let the other queue routines
- * figure out what to do.
- */
-static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
-{
- return false;
-}
+#define IXGBE_RSS_16Q_MASK 0xF
+#define IXGBE_RSS_8Q_MASK 0x7
+#define IXGBE_RSS_4Q_MASK 0x3
+#define IXGBE_RSS_2Q_MASK 0x1
+#define IXGBE_RSS_DISABLED_MASK 0x0
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
+ * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
* @adapter: board private structure to initialize
*
- * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. Also assign queues based on DCB
+ * priorities and map accordingly..
*
**/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- f->mask = 0xF;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ int i;
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* 16 pools w/ 8 TC per pool */
+ if (tcs > 4) {
+ vmdq_i = min_t(u16, vmdq_i, 16);
+ vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
+ /* 32 pools w/ 4 TC per pool */
+ } else {
+ vmdq_i = min_t(u16, vmdq_i, 32);
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session. This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
- f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
- f_fdir->mask = 0;
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
/*
- * Use RSS in addition to Flow Director to ensure the best
- * distribution of flows across cores, even when an FDIR flow
- * isn't matched.
+ * We do not support DCB, VMDq, and RSS all simultaneously
+ * so we will disable RSS since it is the lowest priority
*/
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- adapter->num_tx_queues = f_fdir->indices;
- adapter->num_rx_queues = f_fdir->indices;
- ret = true;
- } else {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->ring_feature[RING_F_RSS].indices = 1;
+ adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = tcs;
+
+ adapter->num_tx_queues = vmdq_i * tcs;
+ adapter->num_rx_queues = vmdq_i * tcs;
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (fcoe_i) {
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * tcs;
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ } else if (tcs > 1) {
+ /* use queue belonging to FcoE TC */
+ fcoe->indices = 1;
+ fcoe->offset = ixgbe_fcoe_get_tc(adapter);
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+
+ fcoe->indices = 0;
+ fcoe->offset = 0;
+ }
}
- return ret;
+
+#endif /* IXGBE_FCOE */
+ /* configure TC to queue mapping */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(adapter->netdev, i, 1, i);
+
+ return true;
}
+static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *f;
+ int rss_i, rss_m, i;
+ int tcs;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ tcs = netdev_get_num_tc(dev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* determine the upper limit for our current DCB mode */
+ rss_i = dev->num_tx_queues / tcs;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ rss_i = min_t(u16, rss_i, 4);
+ rss_m = IXGBE_RSS_4Q_MASK;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ rss_i = min_t(u16, rss_i, 8);
+ rss_m = IXGBE_RSS_8Q_MASK;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ rss_i = min_t(u16, rss_i, 16);
+ rss_m = IXGBE_RSS_16Q_MASK;
+ }
+
+ /* set RSS mask and indices */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = min_t(int, rss_i, f->limit);
+ f->indices = rss_i;
+ f->mask = rss_m;
+
+ /* disable ATR as it is not supported when multiple TCs are enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and offset. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ u8 tc = ixgbe_fcoe_get_tc(adapter);
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+ f->indices = min_t(u16, rss_i, f->limit);
+ f->offset = rss_i * tc;
+ }
+
+#endif /* IXGBE_FCOE */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+ adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_rx_queues = rss_i * tcs;
+
+ return true;
+}
+
+#endif
/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
* @adapter: board private structure to initialize
*
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. If RSS is available, then also try and
+ * enable RSS and map accordingly.
*
**/
-static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = IXGBE_RSS_DISABLED_MASK;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ /* only proceed if SR-IOV is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
- f->indices = min_t(int, num_online_cpus(), f->indices);
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
+ /* 64 pool mode with 2 queues per pool */
+ if ((vmdq_i > 32) || (rss_i < 4)) {
+ vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
+ rss_m = IXGBE_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
+ rss_m = IXGBE_RSS_4Q_MASK;
+ rss_i = 4;
}
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
+
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ adapter->ring_feature[RING_F_RSS].indices = rss_i;
+ adapter->ring_feature[RING_F_RSS].mask = rss_m;
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = rss_i;
+
+ adapter->num_rx_queues = vmdq_i * rss_i;
+ adapter->num_tx_queues = vmdq_i * rss_i;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+ /*
+ * FCoE can use rings from adjacent buffers to allow RSS
+ * like behavior. To account for this we need to add the
+ * FCoE indices to the total ring count.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (vmdq_i > 1 && fcoe_i) {
+ /* reserve no more than number of CPUs */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * rss_i;
+ } else {
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
+ fcoe->offset = fcoe_i - fcoe->indices;
+
+ fcoe_i -= rss_i;
+ }
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ }
+
+#endif
return true;
}
-#endif /* IXGBE_FCOE */
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+/**
+ * ixgbe_set_rss_queues - Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
- int per_tc_q, q, i, offset = 0;
- struct net_device *dev = adapter->netdev;
- int tcs = netdev_get_num_tc(dev);
+ struct ixgbe_ring_feature *f;
+ u16 rss_i;
- if (!tcs)
- return false;
+ /* set mask for 16 queue limit of RSS */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = f->limit;
- /* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
- q = min_t(int, num_online_cpus(), per_tc_q);
+ f->indices = rss_i;
+ f->mask = IXGBE_RSS_16Q_MASK;
- for (i = 0; i < tcs; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
+ /* disable ATR by default, it will be configured below */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ /*
+ * Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ f = &adapter->ring_feature[RING_F_FDIR];
- adapter->num_tx_queues = q * tcs;
- adapter->num_rx_queues = q * tcs;
+ f->indices = min_t(u16, num_online_cpus(), f->limit);
+ rss_i = max_t(u16, rss_i, f->indices);
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
- * indices onto the DCB queue pairs allowing FCoE to own
- * configuration later.
+ /*
+ * FCoE can exist on the same rings as standard network traffic
+ * however it is preferred to avoid that if possible. In order
+ * to get the best performance we allocate as many FCoE queues
+ * as we can and we place them at the end of the ring array to
+ * avoid sharing queues with standard RSS on systems with 24 or
+ * more CPUs.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int tc;
- struct ixgbe_ring_feature *f =
- &adapter->ring_feature[RING_F_FCOE];
-
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- tc = prio_tc[adapter->fcoe.up];
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
+ struct net_device *dev = adapter->netdev;
+ u16 fcoe_i;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ f->indices = min_t(u16, fcoe_i, f->limit);
+ f->offset = fcoe_i - f->indices;
+ rss_i = max_t(u16, fcoe_i, rss_i);
}
-#endif
+
+#endif /* IXGBE_FCOE */
+ adapter->num_rx_queues = rss_i;
+ adapter->num_tx_queues = rss_i;
return true;
}
-#endif
/**
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * ixgbe_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -420,7 +677,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
* fallthrough conditions.
*
**/
-static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
@@ -428,38 +685,18 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
- if (ixgbe_set_sriov_queues(adapter))
- goto done;
-
#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_sriov_queues(adapter))
+ return;
+
if (ixgbe_set_dcb_queues(adapter))
- goto done;
+ return;
#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- goto done;
-
-#endif /* IXGBE_FCOE */
- if (ixgbe_set_fdir_queues(adapter))
- goto done;
-
- if (ixgbe_set_rss_queues(adapter))
- goto done;
-
- /* fallback to base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
-
-done:
- if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
- (adapter->netdev->reg_state == NETREG_UNREGISTERING))
- return 0;
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
- /* Notify the stack of the (possibly) reduced queue counts. */
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- return netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
+ ixgbe_set_rss_queues(adapter);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -507,8 +744,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
- adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ vectors -= NON_Q_VECTORS;
+ adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
}
}
@@ -632,8 +869,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rxr_idx >= f->mask) &&
- (rxr_idx < f->mask + f->indices))
+ if ((rxr_idx >= f->offset) &&
+ (rxr_idx < f->offset + f->indices))
set_bit(__IXGBE_RX_FCOE, &ring->state);
}
@@ -695,7 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
@@ -739,10 +976,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return 0;
err_out:
- while (v_idx) {
- v_idx--;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
- }
return -ENOMEM;
}
@@ -757,14 +996,13 @@ err_out:
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, q_vectors;
+ int v_idx = adapter->num_q_vectors;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- q_vectors = 1;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
- for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
}
@@ -788,11 +1026,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
-static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
- int vector, v_budget;
+ int vector, v_budget, err;
/*
* It's easy to be greedy for MSI-X vectors, but it really
@@ -825,38 +1062,41 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
+ return;
}
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- e_err(probe,
- "ATR is not supported while multiple "
- "queues are disabled. Disabling Flow Director\n");
+ /* disable DCB if number of TCs exceeds 1 */
+ if (netdev_get_num_tc(adapter->netdev) > 1) {
+ e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+ netdev_reset_tc(adapter->netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
}
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 0;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+
+ /* disable SR-IOV */
+ ixgbe_disable_sriov(adapter);
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ /* disable RSS */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
+
+ ixgbe_set_num_queues(adapter);
+ adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
- } else {
+ if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
- /* reset err */
- err = 0;
+ return;
}
-
-out:
- return err;
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
/**
@@ -874,15 +1114,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
int err;
/* Number of supported queues */
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ ixgbe_set_num_queues(adapter);
- err = ixgbe_set_interrupt_capability(adapter);
- if (err) {
- e_dev_err("Unable to setup interrupt capabilities\n");
- goto err_set_interrupt;
- }
+ /* Set interrupt mode */
+ ixgbe_set_interrupt_capability(adapter);
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
@@ -902,7 +1137,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e242104ab47..3b6784cf134 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
-/*
+/**
* ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP
- if (unlikely(tx_buffer->tx_flags &
- IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector,
- tx_buffer->skb);
-
+ if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
+ ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
#endif
+
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -995,7 +993,6 @@ out_no_update:
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
- int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (i = 0; i < num_q_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]);
}
@@ -1399,8 +1391,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
#endif
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
@@ -1526,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = skb_frag_size(frag);
- if (pull_len > 256)
- pull_len = ixgbe_get_headlen(va, pull_len);
+ if (pull_len > IXGBE_RX_HDR_SIZE)
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -1834,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int q_vectors, v_idx;
+ int v_idx;
u32 mask;
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -1849,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
@@ -2413,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int vector, err;
int ri = 0, ti = 0;
- for (vector = 0; vector < q_vectors; vector++) {
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -2572,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i, q_vectors;
+ int vector;
- q_vectors = adapter->num_msix_vectors;
- i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, adapter);
- i--;
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ free_irq(adapter->pdev->irq, adapter);
+ return;
+ }
- for (; i >= 0; i--) {
- /* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rx.ring &&
- !adapter->q_vector[i]->tx.ring)
- continue;
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(adapter->msix_entries[i].vector,
- NULL);
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
- free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
- }
- } else {
- free_irq(adapter->pdev->irq, adapter);
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(entry->vector, NULL);
+
+ free_irq(entry->vector, q_vector);
}
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
}
/**
@@ -2619,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_msix_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
+ int vector;
+
+ for (vector = 0; vector < adapter->num_q_vectors; vector++)
+ synchronize_irq(adapter->msix_entries[vector].vector);
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
@@ -2699,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
- adapter->atr_sample_rate) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ring->atr_sample_rate = adapter->atr_sample_rate;
ring->atr_count = 0;
set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
@@ -2730,8 +2718,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rttdcs;
- u32 reg;
+ u32 rttdcs, mtqc;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2743,28 +2730,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
/* set transmit pool layout */
- switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- case (IXGBE_FLAG_SRIOV_ENABLED):
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
- break;
- default:
- if (!tcs)
- reg = IXGBE_MTQC_64Q_1PB;
- else if (tcs <= 4)
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ mtqc = IXGBE_MTQC_VT_ENA;
+ if (tcs > 4)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mtqc |= IXGBE_MTQC_32VF;
else
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ mtqc |= IXGBE_MTQC_64VF;
+ } else {
+ if (tcs > 4)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
- /* Enable Security TX Buffer IFG for multiple pb */
- if (tcs) {
- reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
- reg |= IXGBE_SECTX_DCB;
- IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
- }
- break;
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ sectx |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
}
/* re-enable the arbiter */
@@ -2858,40 +2849,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
u8 reg_idx = rx_ring->reg_idx;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB: {
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
- const int mask = feature[RING_F_RSS].mask;
- reg_idx = reg_idx & mask;
- }
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- break;
- }
-
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u16 mask = adapter->ring_feature[RING_F_RSS].mask;
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (adapter->num_vfs)
- srrctl |= IXGBE_SRRCTL_DROP_EN;
+ /*
+ * if VMDq is not active we must program one srrctl register
+ * per RSS queue since we have enabled RDRXCTL.MVMEN
+ */
+ reg_idx &= mask;
+ }
- srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK;
+ /* configure header buffer length, needed for RSC */
+ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ /* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
+
+ /* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2903,11 +2888,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
u32 mrqc = 0, reta = 0;
u32 rxcsum;
int i, j;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
- int maxq = adapter->ring_feature[RING_F_RSS].indices;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- if (tcs)
- maxq = min(maxq, adapter->num_tx_queues / tcs);
+ /*
+ * Program table for at least 2 queues w/ SR-IOV so that VFs can
+ * make full use of any rings they may have. We will use the
+ * PSRTYPE register to control how many rings we use within the PF.
+ */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
+ rss_i = 2;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -2915,7 +2904,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == maxq)
+ if (j == rss_i)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
@@ -2929,35 +2918,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
- (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- mrqc = IXGBE_MRQC_RSSEN;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ if (adapter->ring_feature[RING_F_RSS].mask)
+ mrqc = IXGBE_MRQC_RSSEN;
} else {
- int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
- | IXGBE_FLAG_SRIOV_ENABLED);
-
- switch (mask) {
- case (IXGBE_FLAG_RSS_ENABLED):
- if (!tcs)
- mrqc = IXGBE_MRQC_RSSEN;
- else if (tcs <= 4)
- mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ if (tcs > 4)
+ mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
+ mrqc = IXGBE_MRQC_VMDQRSS64EN;
+ } else {
+ if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
- break;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- mrqc = IXGBE_MRQC_VMDQEN;
- break;
- default:
- break;
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RSSEN;
}
}
/* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -3108,6 +3098,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int rss_i = adapter->ring_feature[RING_F_RSS].indices;
int p;
/* PSRTYPE must be initialized in non 82598 adapters */
@@ -3120,58 +3111,69 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
return;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
- psrtype |= (adapter->num_rx_queues_per_pool << 29);
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr_ext;
- u32 vt_reg_bits;
u32 reg_offset, vf_shift;
- u32 vmdctl;
+ u32 gcr_ext, vmdctl;
int i;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return;
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
- vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
+ vmdctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
+ vf_shift = VMDQ_P(0) % 32;
+ reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
- hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
/*
* Set up VF register offsets for selected VT Mode,
* i.e. 32 or 64 VFs for SR-IOV
*/
- gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
- gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
+ break;
+ default:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
+ break;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
/* enable Tx loopback for VF/PF communication */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
/* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw,
- (adapter->num_vfs != 0),
+ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
@@ -3307,10 +3309,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3320,10 +3321,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -3441,15 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->num_vfs;
- unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
+ /* In SR-IOV mode significantly less RAR entries are available */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
+
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (!netdev_uc_empty(netdev) && rar_entries) {
+ if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
/* return error if we do not support writing to RAR table */
if (!hw->mac.ops.set_rar)
@@ -3459,7 +3462,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
if (!rar_entries)
break;
hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- vfn, IXGBE_RAH_AV);
+ VMDQ_P(0), IXGBE_RAH_AV);
count++;
}
}
@@ -3533,12 +3536,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
- if (adapter->num_vfs) {
+ if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
- vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
}
/* This is useful for sniffing bad packets. */
@@ -3564,37 +3569,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_enable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_enable(&adapter->q_vector[q_idx]->napi);
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_disable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_disable(&adapter->q_vector[q_idx]->napi);
}
#ifdef CONFIG_IXGBE_DCB
-/*
+/**
* ixgbe_configure_dcb - Configure DCB hardware
* @adapter: ixgbe adapter struct
*
@@ -3641,19 +3630,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Enable RSS Hash per TC */
if (hw->mac.type != ixgbe_mac_82598EB) {
- int i;
- u32 reg = 0;
-
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- u8 msb = 0;
- u8 cnt = adapter->netdev->tc_to_txq[i].count;
-
- while (cnt >>= 1)
- msb++;
+ u32 msb = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
- reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ while (rss_i) {
+ msb++;
+ rss_i >>= 1;
}
- IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+
+ /* write msb to all 8 TCs in one write */
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
}
}
#endif
@@ -3661,11 +3647,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Additional bittime to account for IXGBE framing */
#define IXGBE_ETH_FRAMING 20
-/*
+/**
* ixgbe_hpbthresh - calculate high water mark for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
{
@@ -3679,18 +3665,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if (dev->features & NETIF_F_FCOE_MTU) {
- int fcoe_pb = 0;
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == ixgbe_fcoe_get_tc(adapter)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-#ifdef CONFIG_IXGBE_DCB
- fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
-
-#endif
- if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
#endif
-
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -3725,11 +3705,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
return marker;
}
-/*
+/**
* ixgbe_lpbthresh - calculate low water mark for for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
{
@@ -3830,12 +3810,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -3865,6 +3839,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_virtualization(adapter);
+#ifdef IXGBE_FCOE
+ /* configure FCoE L2 filters, redirection table, and Rx control */
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
}
@@ -3973,7 +3952,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_VTMODE_64;
+
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_32;
+ break;
+ default:
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ break;
+ }
}
/* Enable Thermal over heat sensor interrupt */
@@ -4131,8 +4121,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+
+ /* update SAN MAC vmdq pool selection */
+ if (hw->mac.san_mac_rar_index)
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
/**
@@ -4413,32 +4406,29 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
- adapter->ring_feature[RING_F_RSS].indices = rss;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].indices =
+ adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
@@ -4449,6 +4439,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
}
+#ifdef IXGBE_FCOE
+ /* FCoE support exists, always init the FCoE lock */
+ spin_lock_init(&adapter->fcoe.lock);
+
+#endif
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
@@ -4497,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
+#ifdef CONFIG_PCI_IOV
+ /* assign number of SR-IOV VFs */
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+
+#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4588,10 +4589,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
- break;
+ goto err_setup_tx;
}
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
}
@@ -4666,10 +4673,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
- break;
+ goto err_setup_rx;
}
+#ifdef IXGBE_FCOE
+ err = ixgbe_setup_fcoe_ddp_resources(adapter);
+ if (!err)
+#endif
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
return err;
}
@@ -4744,6 +4761,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i;
+#ifdef IXGBE_FCOE
+ ixgbe_free_fcoe_ddp_resources(adapter);
+
+#endif
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -4825,15 +4846,31 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
ixgbe_up_complete(adapter);
return 0;
+err_set_queues:
+ ixgbe_free_irq(adapter);
err_req_irq:
-err_setup_rx:
ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
ixgbe_reset(adapter);
return err;
@@ -4891,23 +4928,19 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
- rtnl_lock();
- err = ixgbe_init_interrupt_scheme(adapter);
- rtnl_unlock();
- if (err) {
- e_dev_err("Cannot initialize interrupts for device\n");
- return err;
- }
-
ixgbe_reset(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
- if (netif_running(netdev)) {
+ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- if (err)
- return err;
- }
+
+ rtnl_unlock();
+
+ if (err)
+ return err;
netif_device_attach(netdev);
@@ -5043,11 +5076,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
-#ifdef IXGBE_FCOE
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
-#endif /* IXGBE_FCOE */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5178,17 +5206,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
/* Add up per cpu counters for total ddp aloc fail */
- if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+ if (adapter->fcoe.ddp_pool) {
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ unsigned int cpu;
+ u64 noddp = 0, noddp_ext_buff = 0;
for_each_possible_cpu(cpu) {
- fcoe_noddp_counts_sum +=
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
- fcoe_noddp_ext_buff_counts_sum +=
- *per_cpu_ptr(fcoe->
- pcpu_noddp_ext_buff, cpu);
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ noddp += ddp_pool->noddp;
+ noddp_ext_buff += ddp_pool->noddp_ext_buff;
}
+ hwstats->fcoe_noddp = noddp;
+ hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
}
- hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
- hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
#endif /* IXGBE_FCOE */
break;
default:
@@ -5246,7 +5276,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/**
* ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
{
@@ -5282,7 +5312,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
@@ -5316,7 +5346,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
} else {
/* get one bit for every active tx/rx interrupt vector */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i);
@@ -5330,8 +5360,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
- * @link_speed - pointer to a u32 to store the link_speed
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -5374,7 +5404,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
@@ -5429,12 +5459,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_link_is_down - update netif_carrier status and
* print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
**/
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{
@@ -5458,11 +5491,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
@@ -5511,7 +5547,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_subtask - check and bring link up
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
@@ -5535,7 +5571,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_sfp_detection_subtask - poll for SFP+ cable
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
@@ -5602,7 +5638,7 @@ sfp_out:
/**
* ixgbe_sfp_link_config_subtask - set up link SFP after module install
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
@@ -6233,8 +6269,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- txq += adapter->ring_feature[RING_F_FCOE].mask;
+ struct ixgbe_ring_feature *f;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ while (txq >= f->indices)
+ txq -= f->indices;
+ txq += adapter->ring_feature[RING_F_FCOE].offset;
+
return txq;
}
#endif
@@ -6348,7 +6390,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -6389,17 +6431,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/*
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
}
@@ -6427,8 +6464,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
return 0;
}
@@ -6485,12 +6521,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_mac_info *mac = &adapter->hw.mac;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (is_valid_ether_addr(mac->san_addr)) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
rtnl_lock();
- err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
+
+ /* update SAN MAC vmdq pool selection */
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
return err;
}
@@ -6533,11 +6572,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- ixgbe_msix_clean_rings(0, q_vector);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
@@ -6594,8 +6630,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
#ifdef CONFIG_IXGBE_DCB
-/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
- * #adapter: pointer to ixgbe_adapter
+/**
+ * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * @adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled
*
* Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6630,8 +6667,33 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return;
}
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
+/**
+ * ixgbe_set_prio_tc_map - Configure netdev prio tc map
+ * @adapter: Pointer to adapter struct
+ *
+ * Populate the netdev user priority to tc map
+ */
+static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+ struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+ u8 prio;
+
+ for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
+ u8 tc = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
+ tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
+ else if (ets)
+ tc = ets->prio_tc[prio];
+
+ netdev_set_prio_tc_map(dev, prio, tc);
+ }
+}
+
+/**
+ * ixgbe_setup_tc - configure net_device for multiple traffic classes
*
* @netdev: net device to configure
* @tc: number of traffic classes to enable
@@ -6641,17 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Multiple traffic classes requires multiple queues */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- e_err(drv, "Enable failed, needs MSI-X\n");
- return -EINVAL;
- }
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- e_err(drv, "Enable failed, SR-IOV enabled\n");
- return -EINVAL;
- }
-
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6668,8 +6719,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) {
netdev_set_num_tc(dev, tc);
+ ixgbe_set_prio_tc_map(adapter);
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
@@ -6677,11 +6729,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
}
} else {
netdev_reset_tc(dev);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -6711,10 +6763,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* return error if RXHASH is being enabled when RSS is not supported */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- features &= ~NETIF_F_RXHASH;
-
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
@@ -6754,20 +6802,40 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
- if (!(features & NETIF_F_NTUPLE)) {
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- /* turn off Flow Director, set ATR and reset */
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
/* turn off ATR, enable perfect filters and reset */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ need_reset = true;
+
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ need_reset = true;
+
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+ /* We cannot enable ATR if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ break;
+
+ /* We cannot enable ATR if we have 2 or more traffic classes */
+ if (netdev_get_num_tc(netdev) > 1)
+ break;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ /* A sample rate of 0 indicates ATR disabled */
+ if (!adapter->atr_sample_rate)
+ break;
+
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
}
if (features & NETIF_F_HW_VLAN_RX)
@@ -6791,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
if (ndm->ndm_state & NUD_PERMANENT) {
pr_info("%s: FDB only supports static addresses\n",
@@ -6799,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
return -EINVAL;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
+ if (is_unicast_ether_addr(addr)) {
+ u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
else
- err = -EINVAL;
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
}
/* Only return duplicate errors if NLM_F_EXCL is set */
@@ -6894,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
};
-static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
-{
-#ifdef CONFIG_PCI_IOV
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- /* The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
- ixgbe_enable_sriov(adapter, ii);
-#endif /* CONFIG_PCI_IOV */
-}
-
/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
@@ -6940,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
if (hw->bus.func != 0)
break;
case IXGBE_SUBDEV_ID_82599_SFP:
+ case IXGBE_SUBDEV_ID_82599_RNDC:
is_wol_supported = 1;
break;
}
@@ -6987,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
+ unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7036,7 +7093,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_save_state(pdev);
#ifdef CONFIG_IXGBE_DCB
- indices *= MAX_TRAFFIC_CLASS;
+ if (ii->mac == ixgbe_mac_82598EB)
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_RSS_INDICES);
+ else
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_FDIR_INDICES);
#endif
if (ii->mac == ixgbe_mac_82598EB)
@@ -7048,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
+ indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7154,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
- ixgbe_probe_vf(adapter, ii);
+#ifdef CONFIG_PCI_IOV
+ ixgbe_enable_sriov(adapter, ii);
+#endif
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -7191,10 +7256,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
- IXGBE_FLAG_DCB_ENABLED);
-
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
@@ -7206,11 +7267,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- }
- if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
- netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->vlan_features |= NETIF_F_FSO;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+ adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ netdev->features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC;
+
+ netdev->vlan_features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC |
+ NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
if (pci_using_dac) {
@@ -7249,11 +7314,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- netdev->hw_features &= ~NETIF_F_RXHASH;
- netdev->features &= ~NETIF_F_RXHASH;
- }
-
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
@@ -7364,8 +7424,7 @@ err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(hw->hw_addr);
err_ioremap:
@@ -7412,25 +7471,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
ixgbe_sysfs_exit(adapter);
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_cleanup_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
/* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (!(ixgbe_check_vf_assignment(adapter)))
- ixgbe_disable_sriov(adapter);
- else
- e_dev_warn("Unloading driver while VFs are assigned "
- "- VFs will not be deallocated\n");
- }
+ ixgbe_disable_sriov(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -7521,11 +7568,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
}
/* Find the pci device of the offending VF */
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
while (vfdev) {
if (vfdev->devfn == (req_id & 0xFF))
break;
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
device_id, vfdev);
}
/*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24117709d6a..71659edf81a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index dcebd128bec..3456d561714 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "ixgbe.h"
#include <linux/export.h>
+#include <linux/ptp_classify.h>
/*
* The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
#define NSECS_PER_SEC 1000000000ULL
#endif
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc - the cyclecounter structure
+ * @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
/**
* ixgbe_ptp_adjfreq
- * @ptp - the ptp clock structure
- * @ppb - parts per billion adjustment from base
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/**
* ixgbe_ptp_adjtime
- * @ptp - the ptp clock structure
- * @delta - offset to adjust the cycle counter by
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
/**
* ixgbe_ptp_gettime
- * @ptp - the ptp clock structure
- * @ts - timespec structure to hold the current time value
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
/**
* ixgbe_ptp_settime
- * @ptp - the ptp clock structure
- * @ts - the timespec containing the new time for the cycle counter
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_enable
- * @ptp - the ptp clock structure
- * @rq - the requested feature to change
- * @on - whether to enable or disable the feature
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_check_pps_event
- * @adapter - the private adapter structure
- * @eicr - the interrupt cause register value
+ * @adapter: the private adapter structure
+ * @eicr: the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
return;
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_TIMESYNC)
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
- break;
- default:
- break;
+ break;
+ default:
+ break;
+ }
}
}
/**
* ixgbe_ptp_enable_sdp
- * @hw - the hardware private structure
- * @shift - the clock shift for calculating nanoseconds
+ * @hw: the hardware private structure
+ * @shift: the clock shift for calculating nanoseconds
*
* this function enables the clock out feature on the sdp0 for the
* X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
/**
* ixgbe_ptp_disable_sdp
- * @hw - the private hardware structure
+ * @hw: the private hardware structure
*
* this function disables the auxiliary SDP clock out feature
*/
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_match - determine if this skb matches a ptp packet
+ * @skb: pointer to the skb
+ * @hwtstamp: pointer to the hwtstamp_config to check
+ *
+ * Determine whether the skb should have been timestamped, assuming the
+ * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
+ * should have a timestamp waiting in the registers, and 0 otherwise.
+ *
+ * V1 packets have to check the version type to determine whether they are
+ * correct. However, we can't directly access the data because it might be
+ * fragmented in the SKB, in paged memory. In order to work around this, we
+ * use skb_copy_bits which will properly copy the data whether it is in the
+ * paged memory fragments or not. We have to copy the IP header as well as the
+ * message type.
+ */
+static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+{
+ struct iphdr iph;
+ u8 msgtype;
+ unsigned int type, offset;
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE)
+ return 0;
+
+ type = sk_run_filter(skb, ptp_filter);
+
+ if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
+ return type & PTP_CLASS_V2;
+
+ /* For the remaining cases actually check message type */
+ switch (type) {
+ case PTP_CLASS_V1_IPV4:
+ skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
+ offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ offset = OFF_PTP6 + OFF_PTP_CONTROL;
+ break;
+ default:
+ /* other cases invalid or handled above */
+ return 0;
+ }
+
+ /* Make sure our buffer is long enough */
+ if (skb->len < offset)
+ return 0;
+
+ skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
+ break;
+ default:
+ return 0;
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
/**
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
+ * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
* is passed up the network stack
*/
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+
+ /* Check if we have a valid timestamp and make sure the skb should
+ * have been timestamped */
+ if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
+ !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
+ /*
+ * Always read the registers, in order to clear a possible fault
+ * because of stagnant RX timestamp values for a packet that never
+ * reached the queue.
+ */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
+ * If the timestamp bit is set in the packet's descriptor, we know the
+ * timestamp belongs to this packet. No other packet can be
+ * timestamped until the registers for timestamping have been read.
+ * Therefor only one packet with this bit can be in the queue at a
+ * time, and the rx timestamp values that were in the registers belong
+ * to this packet.
*
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
+ *
+ * Since hardware always timestamps Path delay packets when timestamping V2
+ * packets, regardless of the type specified in the register, only use V2
+ * Event mode. This more accurately tells the user what the hardware is going
+ * to do anyways.
*/
int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/*
- * register RXMTRL must be set, therefore it is not
- * possible to time stamp both V1 Sync and Delay_Req messages
- * and hardware does not support timestamping all packets
- * => return error
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
*/
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
+ /* Store filter value for later use */
+ adapter->rx_hwtstamp_filter = config.rx_filter;
+
/* define ethertype filter for timestamped packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
@@ -826,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_init
- * @adapter - the ixgbe private adapter structure
+ * @adapter: the ixgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
@@ -870,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
+ /* initialize the ptp filter */
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
+ e_dev_warn("ptp_filter_init failed\n");
+
spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 2d971d18696..4fea8716ab6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -44,50 +44,15 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- int device_id;
- int vfs_found = 0;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += 2;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
-
- return vfs_found;
-}
-
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int pre_existing_vfs = 0;
- pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
if (!pre_existing_vfs && !adapter->num_vfs)
return;
@@ -106,16 +71,33 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
"enabled for this device - Please reload all "
"VF drivers to avoid spoofed packet errors\n");
} else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- goto err_novfs;
- }
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
@@ -146,12 +128,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
* and memory allocated set up the mailbox parameters
*/
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops,
- sizeof(hw->mbx.ops));
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
+ (adapter->num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (adapter->num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
+
+ /* We do not support RSS w/ SR-IOV */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
+
+#ifdef IXGBE_FCOE
+ /*
+ * When SR-IOV is enabled 82599 cannot support jumbo frames
+ * so we must disable FCoE because we cannot support FCoE MTU.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
+ IXGBE_FLAG_FCOE_CAPABLE);
+#endif
+
+ /* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
return;
@@ -160,31 +169,80 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* Oh oh */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
- pci_disable_sriov(adapter->pdev);
+ ixgbe_disable_sriov(adapter);
+}
-err_novfs:
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->num_vfs = 0;
+static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ int dev_id;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ dev_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ dev_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ return false;
+ }
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
+ }
+
+ return false;
}
-#endif /* #ifdef CONFIG_PCI_IOV */
+#endif /* #ifdef CONFIG_PCI_IOV */
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr;
u32 gpie;
u32 vmdctl;
- int i;
+
+ /* set num VFs to 0 to prevent access to vfinfo */
+ adapter->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(adapter->mv_list);
+ adapter->mv_list = NULL;
+
+ /* if SR-IOV is already disabled then there is nothing to do */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
#ifdef CONFIG_PCI_IOV
+ /*
+ * If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (ixgbe_vfs_are_assigned(adapter)) {
+ e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+ }
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
#endif
/* turn off device IOV mode */
- gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr &= ~(IXGBE_GCR_EXT_SRIOV);
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -195,19 +253,14 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
IXGBE_WRITE_FLUSH(hw);
+ /* Disable VMDq flag so device will be set in VM mode */
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
/* take a breather then clean up driver data */
msleep(100);
- /* Release reference to VF devices */
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev)
- pci_dev_put(adapter->vfinfo[i].vfdev);
- }
- kfree(adapter->vfinfo);
- kfree(adapter->mv_list);
- adapter->vfinfo = NULL;
-
- adapter->num_vfs = 0;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
@@ -441,33 +494,16 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- int i;
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-#endif
- return false;
-}
-
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
- (pdev->devfn & 1);
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
- random_ether_addr(vf_mac_addr);
+ eth_random_addr(vf_mac_addr);
e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
vfn, vf_mac_addr);
/*
@@ -475,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
* for it later.
*/
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
- if (pvfdev)
- adapter->vfinfo[vfn].vfdev = pvfdev;
- else
- e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2ab38d5fda9..1be1d30e4e7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1cefa6..16ddf14e8ba 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hwmon.h>
-#ifdef CONFIG_IXGBE_HWMON
/* hwmon callback functions */
static ssize_t ixgbe_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
return sprintf(buf, "%u\n", value);
}
-/*
+/**
* ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
- * @ adapter: pointer to the adapter structure
- * @ offset: offset in the eeprom sensor data table
- * @ type: type of sensor data to display
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
*
* For each file we want in hwmon's sysfs interface we need a device_attribute
* This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
exit:
return rc;
}
-#endif /* CONFIG_IXGBE_HWMON */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d2448..400f86a3117 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -32,9 +32,6 @@
#include <linux/mdio.h>
#include <linux/netdevice.h>
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
-
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@@ -57,6 +54,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
@@ -1452,6 +1450,7 @@ enum {
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
@@ -2419,7 +2418,7 @@ typedef u32 ixgbe_physical_layer;
*/
/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
@@ -2450,24 +2449,31 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID_X540) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
- (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2603,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2837,6 +2845,7 @@ struct ixgbe_mac_operations {
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
@@ -2912,6 +2921,7 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
+ u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f90ec078ece..de4da5219b7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -156,6 +156,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc633b..418af827b23 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
@@ -264,32 +265,9 @@ struct ixgbe_adv_tx_context_desc {
/* Interrupt register bitmasks */
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_MAILBOX | \
- IXGBE_EIMS_OTHER)
-
#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e8dddf572d3..8f2070439b5 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -43,7 +43,6 @@
#define IXGBE_ALL_RAR_ENTRIES 16
-#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
- {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -289,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = adapter->tx_ring;
- struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
@@ -303,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err = 0;
u32 new_rx_count, new_tx_count;
+ int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
return 0;
- }
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
- /*
- * If the adapter isn't up and running then just set the
- * new parameters and scurry for the exits.
- */
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
@@ -340,82 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
-
- ixgbevf_down(adapter);
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (err) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
}
- goto err_tx_ring_setup;
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
}
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ /* clone ring and setup updated count */
+ rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
}
- goto err_rx_ring_setup;
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+
+ goto clear_reset;
}
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
- /*
- * Only switch to new rings if all the prior allocations
- * and ring setups have succeeded.
- */
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- adapter->tx_ring_count = new_tx_count;
-
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- adapter->rx_ring_count = new_rx_count;
+ /* bring interface down to prepare for update */
+ ixgbevf_down(adapter);
- /* success! */
- ixgbevf_up(adapter);
+ /* Tx */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ adapter->tx_ring[i] = tx_ring[i];
+ }
+ adapter->tx_ring_count = new_tx_count;
- goto clear_reset;
+ vfree(tx_ring);
+ tx_ring = NULL;
+ }
-err_rx_ring_setup:
- for(i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ /* Rx */
+ if (rx_ring) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ adapter->rx_ring[i] = rx_ring[i];
+ }
+ adapter->rx_ring_count = new_rx_count;
-err_tx_ring_setup:
- kfree(rx_ring);
+ vfree(rx_ring);
+ rx_ring = NULL;
+ }
-err_rx_setup:
- kfree(tx_ring);
+ /* restore interface using new values */
+ ixgbevf_up(adapter);
clear_reset:
+ /* free Tx resources if Rx error is encountered */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ vfree(tx_ring);
+ }
+
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
@@ -674,10 +678,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- }
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0a1b99240d4..98cadb0c4da 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -52,12 +52,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- dma_addr_t page_dma;
- unsigned int page_offset;
};
struct ixgbevf_ring {
+ struct ixgbevf_ring *next;
+ struct net_device *netdev;
+ struct device *dev;
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -83,29 +83,9 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different
* for DCB and RSS modes */
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u64 v_idx; /* maps directly to the index for this ring in the hardware
- * vector array, can also be used for finding the bit in EICR
- * and friends that represents the vector for this ring */
-
- u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
-enum ixgbevf_ring_f_enum {
- RING_F_NONE = 0,
- RING_F_ARRAY_SIZE /* must be last in enum set */
-};
-
-struct ixgbevf_ring_feature {
- int indices;
- int mask;
-};
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -120,8 +100,6 @@ struct ixgbevf_ring_feature {
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
@@ -140,22 +118,42 @@ struct ixgbevf_ring_feature {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+struct ixgbevf_ring_container {
+ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+/* iterator for handling rings in ring container */
+#define ixgbevf_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
- u32 eitr;
- int v_idx; /* vector index in list */
+ struct ixgbevf_ring_container rx, tx;
+ char name[IFNAMSIZ + 9];
};
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
* supported by all of the ixgbe hardware is 8.
@@ -168,12 +166,12 @@ struct ixgbevf_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBEVF_RX_DESC(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_DESC(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_CTXTDESC(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -181,9 +179,8 @@ struct ixgbevf_q_vector {
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2
-#define MAX_MSIX_COUNT 2
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
@@ -193,12 +190,14 @@ struct ixgbevf_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ /* interrupt masks */
+ u32 eims_enable_mask;
+ u32 eims_other;
/* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */
@@ -213,18 +212,13 @@ struct ixgbevf_adapter {
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -232,15 +226,8 @@ struct ixgbevf_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -254,18 +241,16 @@ struct ixgbevf_adapter {
u32 eitr_param;
unsigned long state;
- u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
- unsigned long link_check_timeout;
struct work_struct watchdog_task;
- bool netdev_registered;
- bool dev_closed;
+
+ spinlock_t mbx_lock;
};
enum ixbgevf_state_t {
@@ -301,11 +286,8 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-
-#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
-#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 41e32257a4e..3f9841d619a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg);
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
}
-/*
+/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer
*tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -175,27 +174,20 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-#else
-#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
-#endif
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void ixgbevf_tx_timeout(struct net_device *netdev);
/**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: board private structure
* @tx_ring: tx ring to clean
**/
-static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0;
@@ -206,10 +198,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < tx_ring->count)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */
@@ -217,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
goto cont_loop;
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -234,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
total_bytes += bytecount;
}
- ixgbevf_unmap_and_free_tx_resource(adapter,
+ ixgbevf_unmap_and_free_tx_resource(tx_ring,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -246,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
cont_loop:
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
-#ifdef HAVE_TX_MQ
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
- !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
- }
-#else
- if (netif_queue_stopped(netdev) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_queue(netdev);
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
++adapter->restart_queue;
}
-#endif
- }
-
- /* re-arm the interrupt */
- if ((count >= tx_ring->work_limit) &&
- (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
u64_stats_update_begin(&tx_ring->syncp);
@@ -284,7 +264,7 @@ cont_loop:
tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
- return count < tx_ring->work_limit;
+ return count < tx_ring->count;
}
/**
@@ -304,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (is_vlan && test_bit(tag, adapter->active_vlans))
+ if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -320,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
* @skb: skb currently being received and modified
**/
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Rx csum disabled */
- if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IP and error */
@@ -360,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+ unsigned int i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
-
- if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb(adapter->netdev,
- bufsz);
-
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
bi->skb = skb;
}
if (!bi->dma) {
@@ -413,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
@@ -431,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
- u64 qmask)
+ u32 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
- mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
- int *work_done, int work_to_do)
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -469,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb;
unsigned int i;
u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
- u32 upper_len = 0;
- if (*work_done >= work_to_do)
+ if (!budget)
break;
- (*work_done)++;
+ budget--;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
- if (len > IXGBEVF_RX_HDR_SIZE)
- len = IXGBEVF_RX_HDR_SIZE;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ len = le16_to_cpu(rx_desc->wb.upper.length);
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL;
@@ -511,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb_put(skb, len);
}
- if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
-
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
- get_page(rx_buffer_info->page);
-
- skb->len += upper_len;
- skb->data_len += upper_len;
- skb->truesize += upper_len;
- }
-
i++;
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
next_buffer = &rx_ring->rx_buffer_info[i];
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- } else {
- skb->next = next_buffer->skb;
- skb->next->prev = skb;
- }
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
@@ -561,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, staterr, skb);
+ ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -576,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
@@ -608,95 +492,74 @@ next_desc:
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
- return cleaned;
+ return !!budget;
}
/**
- * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * ixgbevf_poll - NAPI polling calback
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
**/
-static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+static int ixgbevf_poll(struct napi_struct *napi, int budget)
{
struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0;
- long r_idx;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ struct ixgbevf_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
- }
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ 1 << q_vector->v_idx);
- return work_done;
+ return 0;
}
/**
- * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0, i;
- long r_idx;
- u64 enable_mask = 0;
-
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
- budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
- enable_mask |= rx_ring->v_idx;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
-#ifndef HAVE_NETDEV_NAPI_LIST
- if (!netif_running(adapter->netdev))
- work_done = 0;
-
-#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, enable_mask);
- }
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
- return work_done;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -707,56 +570,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_q_vector *q_vector;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j, q_vectors, v_idx, r_idx;
- u32 mask;
+ int q_vectors, v_idx;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ adapter->eims_enable_mask = 0;
/*
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbevf_ring *ring;
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
-
- for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 0, j, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
- }
- r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
-
- for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 1, j, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
}
- /* if this is a tx only vector halve the interrupt rate */
- if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
- else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= 1 << v_idx;
- ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
-
- /* set up to autoclear timer, and the vectors */
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~IXGBE_EIMS_OTHER;
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+ /* setup eims_other and add value to global eims_enable_mask */
+ adapter->eims_other = 1 << v_idx;
+ adapter->eims_enable_mask |= adapter->eims_other;
}
enum latency_range {
@@ -768,11 +624,8 @@ enum latency_range {
/**
* ixgbevf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -782,17 +635,17 @@ enum latency_range {
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
u32 timepassed_us;
u64 bytes_perint;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -800,134 +653,77 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = q_vector->itr >> 2;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ if (bytes_perint > 10)
+ itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ if (bytes_perint > 20)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= 10)
+ itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ if (bytes_perint <= 20)
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
-/**
- * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update VTEITR registers at runtime. Hardware
- * specific quirks/differences are taken care of here.
- */
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg)
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbe_hw *hw = &adapter->hw;
+ u32 new_itr = q_vector->itr;
+ u8 current_itr;
- itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+ ixgbevf_update_itr(q_vector, &q_vector->tx);
+ ixgbevf_update_itr(q_vector, &q_vector->rx);
- /*
- * set the WDIS bit to not clear the timer bits and cause an
- * immediate assertion of the interrupt
- */
- itr_reg |= IXGBE_EITR_CNT_WDIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
-}
-
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- u32 new_itr;
- u8 current_itr, ret_itr;
- int i, r_idx, v_idx = q_vector->v_idx;
- struct ixgbevf_ring *rx_ring, *tx_ring;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 100000;
+ new_itr = IXGBE_100K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
default:
- new_itr = 8000;
+ new_itr = IXGBE_8K_ITR;
break;
}
- if (new_itr != q_vector->eitr) {
- u32 itr_reg;
-
- /* save the algorithm value here, not the smoothed one */
- q_vector->eitr = new_itr;
+ if (new_itr != q_vector->itr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
+
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ ixgbevf_write_eitr(q_vector);
}
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
- u32 eicr;
u32 msg;
bool got_ack = false;
- eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
-
if (!hw->mbx.ops.check_for_ack(hw))
got_ack = true;
@@ -956,63 +752,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
-{
- struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *tx_ring;
- int i, r_idx;
-
- if (!q_vector->txr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- ixgbevf_clean_tx_irq(adapter, tx_ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
return IRQ_HANDLED;
}
+
/**
- * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
-static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
{
struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- int r_idx;
-
- if (!q_vector->rxr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
- /* disable interrupts on this vector only */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
- napi_schedule(&q_vector->napi);
-
-
- return IRQ_HANDLED;
-}
-static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
-{
- ixgbevf_msix_clean_rx(irq, data);
- ixgbevf_msix_clean_tx(irq, data);
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1022,9 +779,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ a->rx_ring[r_idx].next = q_vector->rx.ring;
+ q_vector->rx.ring = &a->rx_ring[r_idx];
+ q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1032,9 +789,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
- a->tx_ring[t_idx].v_idx = 1 << v_idx;
+ a->tx_ring[t_idx].next = q_vector->tx.ring;
+ q_vector->tx.ring = &a->tx_ring[t_idx];
+ q_vector->tx.count++;
}
/**
@@ -1110,37 +867,30 @@ out:
static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irqreturn_t (*handler)(int, void *);
- int i, vector, q_vectors, err;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
int ri = 0, ti = 0;
- /* Decrement for Other and TCP Timer vectors */
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
- ? &ixgbevf_msix_clean_many : \
- (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
- (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
- NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
-
- if (handler == &ixgbevf_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
- } else if (handler == &ixgbevf_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else if (handler == &ixgbevf_msix_clean_many) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
} else {
/* skip this unused q_vector */
continue;
}
- err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
+ q_vector->name, q_vector);
if (err) {
hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt "
@@ -1149,9 +899,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:mbx", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ &ixgbevf_msix_mbx, 0, netdev->name, adapter);
if (err) {
hw_dbg(&adapter->hw,
"request_irq for msix_mbx failed: %d\n", err);
@@ -1161,9 +910,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return 0;
free_queue_irqs:
- for (i = vector - 1; i >= 0; i--)
- free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ while (vector) {
+ vector--;
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1176,11 +927,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
- q_vector->eitr = adapter->eitr_param;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -1206,17 +956,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
@@ -1230,10 +983,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
**/
static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
IXGBE_WRITE_FLUSH(hw);
@@ -1245,23 +1000,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
* ixgbevf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
- bool queues, bool flush)
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 mask;
- u64 qmask;
-
- mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- qmask = ~0;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- if (queues)
- ixgbevf_irq_enable_queues(adapter, qmask);
- if (flush)
- IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
}
/**
@@ -1311,29 +1056,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBEVF_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- } else {
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBEVF_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1353,36 +1083,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
u32 rdlen;
int rx_buf_len;
- /* Decide whether to use packet split mode or not */
- if (netdev->mtu > ETH_DATA_LEN) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- } else {
- if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- }
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- rx_buf_len = IXGBEVF_RX_HDR_SIZE;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
- }
+ /* PSRTYPE must be initialized in 82599 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1409,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+
+ spin_unlock(&adapter->mbx_lock);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -1422,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -1479,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
+
+ spin_unlock(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1493,15 +1213,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- napi = &q_vector->napi;
- if (q_vector->rxr_count > 1)
- napi->poll = &ixgbevf_clean_rxonly_many;
-
- napi_enable(napi);
+ napi_enable(&q_vector->napi);
}
}
@@ -1513,8 +1226,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
napi_disable(&q_vector->napi);
}
}
@@ -1532,9 +1243,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ ixgbevf_alloc_rx_buffers(adapter, ring,
+ IXGBE_DESC_UNUSED(ring));
}
}
@@ -1638,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar) {
if (is_valid_ether_addr(hw->mac.addr))
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
@@ -1649,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1658,10 +1372,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
- /* bring the link up in the watchdog, this could race with our first
- * link up interrupt but shouldn't be a problem */
- adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
}
@@ -1676,7 +1386,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
}
/**
@@ -1714,14 +1424,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
dev_kfree_skb(this);
} while (skb);
}
- if (!rx_buffer_info->page)
- continue;
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- put_page(rx_buffer_info->page);
- rx_buffer_info->page = NULL;
- rx_buffer_info->page_offset = 0;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1758,7 +1460,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1873,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
+ spin_unlock(&adapter->mbx_lock);
+
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1891,10 +1597,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
{
int err, vector_threshold;
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
@@ -1933,8 +1638,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
}
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -1949,8 +1654,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
}
/**
@@ -1979,12 +1682,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i].dev = &adapter->pdev->dev;
+ adapter->tx_ring[i].netdev = adapter->netdev;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i].count = adapter->rx_ring_count;
adapter->rx_ring[i].queue_index = i;
adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i].dev = &adapter->pdev->dev;
+ adapter->rx_ring[i].netdev = adapter->netdev;
}
return 0;
@@ -2011,10 +1718,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
*/
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -2045,12 +1754,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbevf_q_vector *q_vector;
- int napi_vectors;
- int (*poll)(struct napi_struct *, int);
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
- poll = &ixgbevf_clean_rxonly;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2058,10 +1763,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
goto err_out;
q_vector->adapter = adapter;
q_vector->v_idx = q_idx;
- q_vector->eitr = adapter->eitr_param;
- if (q_idx < napi_vectors)
- netif_napi_add(adapter->netdev, &q_vector->napi,
- (*poll), 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbevf_poll, 64);
adapter->q_vector[q_idx] = q_vector;
}
@@ -2207,21 +1910,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->netdev->addr_len);
}
- /* Enable dynamic interrupt throttling rates */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
+ /* Enable dynamic interrupt throttling rates */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
/* set default ring sizes */
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
- /* enable rx csum by default */
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -2281,7 +1980,7 @@ static void ixgbevf_watchdog(unsigned long data)
{
struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
- u64 eics = 0;
+ u32 eics = 0;
int i;
/*
@@ -2295,11 +1994,11 @@ static void ixgbevf_watchdog(unsigned long data)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
- eics |= (1 << i);
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= 1 << i;
}
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
watchdog_short_circuit:
schedule_work(&adapter->watchdog_task);
@@ -2353,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* no LSC interrupt
*/
if (hw->mac.ops.check_link) {
- if ((hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false)) != 0) {
+ s32 need_reset;
+
+ spin_lock(&adapter->mbx_lock);
+
+ need_reset = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
+ if (need_reset) {
adapter->link_up = link_up;
adapter->link_speed = link_speed;
netif_carrier_off(netdev);
@@ -2469,7 +2176,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -2673,7 +2379,7 @@ static int ixgbevf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
return 0;
@@ -2715,172 +2421,153 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
-static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- adapter->hw_tso_ctxt++;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
- }
+ context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- return true;
+static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
}
- return false;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |= (tx_flags &
- IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case __constant_htons(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- pr_warn("partial checksum but "
- "proto=%x!\n", skb->protocol);
- }
- break;
- }
- }
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- adapter->hw_csum_tx_good++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4_hdr = 0;
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
- return true;
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
}
- return false;
+ /* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
- struct pci_dev *pdev = adapter->pdev;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
@@ -2899,12 +2586,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_single(tx_ring->dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2929,12 +2615,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->dma =
- skb_frag_dma_map(&adapter->pdev->dev, frag,
+ skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev,
+ tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2955,15 +2641,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
count--;
@@ -2974,14 +2660,13 @@ dma_error:
if (i < 0)
i += tx_ring->count;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return count;
}
-static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring, int tx_flags,
+static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -2998,28 +2683,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+
if (tx_flags & IXGBE_TX_FLAGS_TSO) {
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
-
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
+
+ }
- } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= IXGBE_ADVTXD_CC;
olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3031,24 +2719,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
-static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -3060,17 +2738,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++adapter->restart_queue;
return 0;
}
-static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3081,54 +2758,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_flags = 0;
u8 hdr_len = 0;
int r_idx = 0, tso;
- int count = 0;
-
- unsigned int f;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
tx_ring = &adapter->tx_ring[r_idx];
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
-
- if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
- return NETDEV_TX_BUSY;
- }
-
first = tx_ring->next_to_use;
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
+ else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
- ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
- ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ ixgbevf_tx_queue(tx_ring, tx_flags,
+ ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
- ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -3152,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar)
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ spin_unlock(&adapter->mbx_lock);
+
return 0;
}
@@ -3211,9 +2904,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_free_all_rx_resources(adapter);
}
-#ifdef CONFIG_PM
pci_save_state(pdev);
-#endif
pci_disable_device(pdev);
}
@@ -3256,19 +2947,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- if (features & NETIF_F_RXCSUM)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3281,7 +2959,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
- .ndo_set_features = ixgbevf_set_features,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3341,12 +3018,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
pci_set_master(pdev);
-#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
-#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -3387,10 +3060,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
- adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
if (err)
@@ -3449,8 +3118,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- adapter->netdev_registered = true;
-
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
@@ -3460,8 +3127,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled\n");
-
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
@@ -3501,10 +3166,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- if (adapter->netdev_registered) {
+ if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- adapter->netdev_registered = false;
- }
ixgbevf_reset_interrupt_capability(adapter);
@@ -3521,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/**
+ * ixgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the ixgbevf_resume routine.
+ */
+static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+
+ ixgbevf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the ixgbevf_resume routine.
+ */
+static void ixgbevf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers ixgbevf_err_handler = {
+ .error_detected = ixgbevf_io_error_detected,
+ .slot_reset = ixgbevf_io_slot_reset,
+ .resume = ixgbevf_io_resume,
+};
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
.shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4ea6580d3ae..c911d883c27 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2743,6 +2743,17 @@ jme_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void jme_netpoll(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ jme_intr(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
static int
jme_nway_reset(struct net_device *netdev)
{
@@ -2944,6 +2955,9 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_tx_timeout = jme_tx_timeout,
.ndo_fix_features = jme_fix_features,
.ndo_set_features = jme_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = jme_netpoll,
+#endif
};
static int __devinit
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd5151..003c5bc7189 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
- skb->dev = ch->netdev;
skb->protocol = eth_type_trans(skb, ch->netdev);
netif_receive_skb(skb);
}
@@ -646,7 +645,7 @@ ltq_etop_init(struct net_device *dev)
memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
if (!is_valid_ether_addr(mac.sa_data)) {
pr_warn("etop: invalid MAC, using random\n");
- random_ether_addr(mac.sa_data);
+ eth_random_addr(mac.sa_data);
random_mac = true;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0f06b2bc28..770ee557924 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1896,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
goto out_free;
}
- rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
@@ -2001,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_desc_area_size = size;
- tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b075a..59489722e89 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28a54451a3e..2b0748dba8b 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
{ 0 }
};
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
/* Reading this mask interrupts as side effect */
status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
+ if (status == 0 || status == ~0) {
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
return IRQ_NONE;
+ }
prefetch(&hw->st_le[hw->st_idx]);
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
reg);
+ if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+ /* change PHY Interrupt polarity to low active */
+ reg = sky2_read16(hw, GPHY_CTRL);
+ sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+ /* adapt HW for low active PHY Interrupt */
+ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+ sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+ }
+
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4871,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"UL 2", /* 0xba */
"Unknown", /* 0xbb */
"Optima", /* 0xbc */
- "Optima Prime", /* 0xbd */
+ "OptimaEEE", /* 0xbd */
"Optima 2", /* 0xbe */
};
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce80b7..615ac63ea86 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
PSM_CONFIG_REG3 = 0x164,
PSM_CONFIG_REG4 = 0x168,
+ PCI_LDO_CTRL = 0xbc,
};
/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
CHIP_REV_YU_SU_B1 = 3,
};
+enum yukon_prm_rev {
+ CHIP_REV_YU_PRM_Z1 = 1,
+ CHIP_REV_YU_PRM_A0 = 2,
+};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 842c8ce9494..7e94987d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = NULL
},
+ /* flow steering commands */
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_DETACH,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 908a460d8db..aa9c2f6cf3c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
char name[25];
+ struct cpu_rmap *rmap =
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap;
+#else
+ NULL;
+#endif
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -91,7 +97,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
sprintf(name, "%s-%d", priv->dev->name,
cq->ring);
/* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+ if (mlx4_assign_eq(mdev->dev, name, rmap,
+ &cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to "
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce2b08..9d0b88eea02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -34,10 +34,14 @@
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
#include "en_port.h"
+#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
+#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
+#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
return err;
}
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int mlx4_en_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+ u64 full_mac = ~0ull;
+ u64 zero_mac = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (cmd->fs.m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+ /* don't allow mask which isn't all 0 or 1 */
+ if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
+ !all_zeros_or_all_ones(l4_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l4_mask->psrc) ||
+ !all_zeros_or_all_ones(l4_mask->pdst))
+ return -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+ (!l3_mask->ip4src && !l3_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l3_mask->ip4src) ||
+ !all_zeros_or_all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* source mac mask must not be set */
+ if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
+ return -EINVAL;
+
+ /* dest mac mask must be ff:ff:ff:ff:ff:ff */
+ if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
+ return -EINVAL;
+
+ if (!all_zeros_or_all_ones(eth_mask->h_proto))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ if (cmd->fs.m_ext.vlan_etype ||
+ !(cmd->fs.m_ext.vlan_tci == 0 ||
+ cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int add_ip_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ if (!spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
+ if (l3_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
+ if (l3_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+ list_add_tail(&spec_l3->list, list_h);
+
+ return 0;
+}
+
+static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h, int proto)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct mlx4_spec_list *spec_l4;
+ struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
+ if (!spec_l4 || !spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+
+ if (proto == TCP_V4_FLOW) {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
+ } else {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
+ }
+
+ if (l4_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ if (l4_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+
+ if (l4_mask->psrc)
+ spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
+ if (l4_mask->pdst)
+ spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
+
+ list_add_tail(&spec_l3->list, list_h);
+ list_add_tail(&spec_l4->list, list_h);
+
+ return 0;
+}
+
+static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h)
+{
+ int err;
+ u64 mac;
+ __be64 be_mac;
+ struct ethhdr *eth_spec;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_spec_list *spec_l2;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ err = mlx4_en_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
+ mac = priv->mac & MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ eth_spec = &cmd->fs.h_u.ether_spec;
+ memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ spec_l2->eth.ether_type = eth_spec->h_proto;
+ if (eth_spec->h_proto)
+ spec_l2->eth.ether_type_enable = 1;
+ break;
+ case IP_USER_FLOW:
+ err = add_ip_rule(priv, cmd, rule_list_h);
+ break;
+ case TCP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
+ break;
+ case UDP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
+ break;
+ }
+
+ return err;
+}
+
+static int mlx4_en_flow_replace(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ethtool_flow_id *loc_rule;
+ struct mlx4_spec_list *spec, *tmp_spec;
+ u32 qpn;
+ u64 reg_id;
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ };
+
+ rule.port = priv->port;
+ rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
+ INIT_LIST_HEAD(&rule.list);
+
+ /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
+ qpn = priv->drop_qp.qpn;
+ else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
+ qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
+ } else {
+ if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
+ if (!qpn) {
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ }
+ rule.qpn = qpn;
+ err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
+ if (err)
+ goto out_free_list;
+
+ loc_rule = &priv->ethtool_rules[cmd->fs.location];
+ if (loc_rule->id) {
+ err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
+ cmd->fs.location, loc_rule->id);
+ goto out_free_list;
+ }
+ loc_rule->id = 0;
+ memset(&loc_rule->flow_spec, 0,
+ sizeof(struct ethtool_rx_flow_spec));
+ }
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
+ if (err) {
+ en_err(priv, "Fail to attach network rule at location %d.\n",
+ cmd->fs.location);
+ goto out_free_list;
+ }
+ loc_rule->id = reg_id;
+ memcpy(&loc_rule->flow_spec, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+out_free_list:
+ list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
+ list_del(&spec->list);
+ kfree(spec);
+ }
+ return err;
+}
+
+static int mlx4_en_flow_detach(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[cmd->fs.location];
+ if (!rule->id) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = mlx4_flow_detach(priv->mdev->dev, rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
+ cmd->fs.location, rule->id);
+ goto out;
+ }
+ rule->id = 0;
+ memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+out:
+ return err;
+
+}
+
+static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[loc];
+ if (rule->id)
+ memcpy(&cmd->fs, &rule->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ else
+ err = -ENOENT;
+
+ return err;
+}
+
+static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
+{
+
+ int i, res = 0;
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ if (priv->ethtool_rules[i].id)
+ res++;
+ }
+ return res;
+
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
+ int i = 0, priority = 0;
+
+ if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
+ cmd->cmd == ETHTOOL_GRXCLSRULE ||
+ cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
+ mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = mlx4_en_get_num_flows(priv);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
+ err = mlx4_en_get_flow(dev, cmd, i);
+ if (!err)
+ rule_locs[priority++] = i;
+ i++;
+ }
+ err = 0;
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
+static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx4_en_flow_replace(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx4_en_flow_detach(dev, cmd);
+ break;
+ default:
+ en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
+ .set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 073b85b45fc..8864d8b5373 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -36,6 +36,8 @@
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/hash.h>
+#include <net/ip.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -66,6 +68,299 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
+#ifdef CONFIG_RFS_ACCEL
+
+struct mlx4_en_filter {
+ struct list_head next;
+ struct work_struct work;
+
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+
+ int rxq_index;
+ struct mlx4_en_priv *priv;
+ u32 flow_id; /* RFS infrastructure id */
+ int id; /* mlx4_en driver id */
+ u64 reg_id; /* Flow steering API id */
+ u8 activated; /* Used to prevent expiry before filter
+ * is attached
+ */
+ struct hlist_node filter_chain;
+};
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+
+static void mlx4_en_filter_work(struct work_struct *work)
+{
+ struct mlx4_en_filter *filter = container_of(work,
+ struct mlx4_en_filter,
+ work);
+ struct mlx4_en_priv *priv = filter->priv;
+ struct mlx4_spec_list spec_tcp = {
+ .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ {
+ .tcp_udp = {
+ .dst_port = filter->dst_port,
+ .dst_port_msk = (__force __be16)-1,
+ .src_port = filter->src_port,
+ .src_port_msk = (__force __be16)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_ip = {
+ .id = MLX4_NET_TRANS_RULE_ID_IPV4,
+ {
+ .ipv4 = {
+ .dst_ip = filter->dst_ip,
+ .dst_ip_msk = (__force __be32)-1,
+ .src_ip = filter->src_ip,
+ .src_ip_msk = (__force __be32)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_eth = {
+ .id = MLX4_NET_TRANS_RULE_ID_ETH,
+ };
+ struct mlx4_net_trans_rule rule = {
+ .list = LIST_HEAD_INIT(rule.list),
+ .queue_mode = MLX4_NET_TRANS_Q_LIFO,
+ .exclusive = 1,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .port = priv->port,
+ .priority = MLX4_DOMAIN_RFS,
+ };
+ int rc;
+ __be64 mac;
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ list_add_tail(&spec_eth.list, &rule.list);
+ list_add_tail(&spec_ip.list, &rule.list);
+ list_add_tail(&spec_tcp.list, &rule.list);
+
+ mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
+
+ rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
+ memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ filter->activated = 0;
+
+ if (filter->reg_id) {
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+ }
+
+ rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
+ if (rc)
+ en_err(priv, "Error attaching flow. err = %d\n", rc);
+
+ mlx4_en_filter_rfs_expire(priv);
+
+ filter->activated = 1;
+}
+
+static inline struct hlist_head *
+filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+ l ^= (__force unsigned long)(src_ip ^ dst_ip);
+
+ bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
+
+ return &priv->filter_hash[bucket_idx];
+}
+
+static struct mlx4_en_filter *
+mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
+ __be32 dst_ip, __be16 src_port, __be16 dst_port,
+ u32 flow_id)
+{
+ struct mlx4_en_filter *filter = NULL;
+
+ filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
+ if (!filter)
+ return NULL;
+
+ filter->priv = priv;
+ filter->rxq_index = rxq_index;
+ INIT_WORK(&filter->work, mlx4_en_filter_work);
+
+ filter->src_ip = src_ip;
+ filter->dst_ip = dst_ip;
+ filter->src_port = src_port;
+ filter->dst_port = dst_port;
+
+ filter->flow_id = flow_id;
+
+ filter->id = priv->last_filter_id++;
+
+ list_add_tail(&filter->next, &priv->filters);
+ hlist_add_head(&filter->filter_chain,
+ filter_hash_bucket(priv, src_ip, dst_ip, src_port,
+ dst_port));
+
+ return filter;
+}
+
+static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
+{
+ struct mlx4_en_priv *priv = filter->priv;
+ int rc;
+
+ list_del(&filter->next);
+
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+
+ kfree(filter);
+}
+
+static inline struct mlx4_en_filter *
+mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ struct hlist_node *elem;
+ struct mlx4_en_filter *filter;
+ struct mlx4_en_filter *ret = NULL;
+
+ hlist_for_each_entry(filter, elem,
+ filter_hash_bucket(priv, src_ip, dst_ip,
+ src_port, dst_port),
+ filter_chain) {
+ if (filter->src_ip == src_ip &&
+ filter->dst_ip == dst_ip &&
+ filter->src_port == src_port &&
+ filter->dst_port == dst_port) {
+ ret = filter;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(net_dev);
+ struct mlx4_en_filter *filter;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+ int nhoff = skb_network_offset(skb);
+ int ret = 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ src_ip = ip->saddr;
+ dst_ip = ip->daddr;
+ src_port = ports[0];
+ dst_port = ports[1];
+
+ if (ip->protocol != IPPROTO_TCP)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&priv->filters_lock);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ if (filter) {
+ if (filter->rxq_index == rxq_index)
+ goto out;
+
+ filter->rxq_index = rxq_index;
+ } else {
+ filter = mlx4_en_filter_alloc(priv, rxq_index,
+ src_ip, dst_ip,
+ src_port, dst_port, flow_id);
+ if (!filter) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ queue_work(priv->mdev->workqueue, &filter->work);
+
+out:
+ ret = filter->id;
+err:
+ spin_unlock_bh(&priv->filters_lock);
+
+ return ret;
+}
+
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring)
+{
+ struct mlx4_en_filter *filter, *tmp;
+ LIST_HEAD(del_list);
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ }
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next) {
+ cancel_work_sync(&filter->work);
+ mlx4_en_filter_free(filter);
+ }
+}
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
+ LIST_HEAD(del_list);
+ int i = 0;
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
+ break;
+
+ if (filter->activated &&
+ !work_pending(&filter->work) &&
+ rps_may_expire_flow(priv->dev,
+ filter->rxq_index, filter->flow_id,
+ filter->id)) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ } else
+ last_filter = filter;
+
+ i++;
+ }
+
+ if (last_filter && (&last_filter->next != priv->filters.next))
+ list_move(&priv->filters, &last_filter->next);
+
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next)
+ mlx4_en_filter_free(filter);
+}
+#endif
+
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -170,33 +465,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_mc_list *tmp, *mc_to_del;
- kfree(priv->mc_addrs);
- priv->mc_addrs = NULL;
- priv->mc_addrs_cnt = 0;
+ list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
+ list_del(&mc_to_del->list);
+ kfree(mc_to_del);
+ }
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *mc_addrs;
- int mc_addrs_cnt = netdev_mc_count(dev);
- int i;
+ struct mlx4_en_mc_list *tmp;
- mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
- if (!mc_addrs) {
- en_err(priv, "failed to allocate multicast list\n");
- return;
- }
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
mlx4_en_clear_list(dev);
- priv->mc_addrs = mc_addrs;
- priv->mc_addrs_cnt = mc_addrs_cnt;
+ netdev_for_each_mc_addr(ha, dev) {
+ tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ en_err(priv, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add_tail(&tmp->list, &priv->mc_list);
+ }
}
+static void update_mclist_flags(struct mlx4_en_priv *priv,
+ struct list_head *dst,
+ struct list_head *src)
+{
+ struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
+ bool found;
+
+ /* Find all the entries that should be removed from dst,
+ * These are the entries that are not found in src
+ */
+ list_for_each_entry(dst_tmp, dst, list) {
+ found = false;
+ list_for_each_entry(src_tmp, src, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ dst_tmp->action = MCLIST_REM;
+ }
+
+ /* Add entries that exist in src but not in dst
+ * mark them as need to add
+ */
+ list_for_each_entry(src_tmp, src, list) {
+ found = false;
+ list_for_each_entry(dst_tmp, dst, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ dst_tmp->action = MCLIST_NONE;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ GFP_KERNEL);
+ if (!new_mc) {
+ en_err(priv, "Failed to allocate current multicast list\n");
+ return;
+ }
+ memcpy(new_mc, src_tmp,
+ sizeof(struct mlx4_en_mc_list));
+ new_mc->action = MCLIST_ADD;
+ list_add_tail(&new_mc->list, dst);
+ }
+ }
+}
static void mlx4_en_set_multicast(struct net_device *dev)
{
@@ -214,9 +557,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
+ struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
- int err;
+ int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
@@ -251,16 +595,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!(mdev->dev->caps.flags &
- MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 1);
- else
- err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed enabling "
- "promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ 1);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ break;
+ }
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +643,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
en_err(priv, "Failed disabling "
"multicast filter\n");
- /* Add the default qp number as multicast promisc */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed entering multicast promisc mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
-
/* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
@@ -296,22 +661,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 0);
- else
- err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
}
/* Enable port VLAN filter */
@@ -329,18 +712,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
- int i;
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +762,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
- /* Detach our qp from all the multicast addresses */
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
- }
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
@@ -367,13 +771,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- mcast_addr =
- mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, 0, MLX4_PROT_ETH);
+ list_for_each_entry(mclist, &priv->mc_list, list) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -381,6 +780,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
+
+ update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ if (mclist->action == MCLIST_REM) {
+ /* detach this address and delete from list */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_detach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ MLX4_PROT_ETH,
+ mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to detach multicast address\n");
+
+ /* remove from list */
+ list_del(&mclist->list);
+ kfree(mclist);
+ } else if (mclist->action == MCLIST_ADD) {
+ /* attach the address */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ /* needed for B0 steering support */
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_attach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ priv->port, 0,
+ MLX4_PROT_ETH,
+ &mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to attach multicast address\n");
+
+ }
+ }
}
out:
mutex_unlock(&mdev->state_lock);
@@ -605,6 +1038,9 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
}
+ INIT_LIST_HEAD(&priv->mc_list);
+ INIT_LIST_HEAD(&priv->curr_list);
+
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
@@ -653,6 +1089,10 @@ int mlx4_en_start_port(struct net_device *dev)
goto mac_err;
}
+ err = mlx4_en_create_drop_qp(priv);
+ if (err)
+ goto rss_err;
+
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
@@ -720,13 +1160,23 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- 0, MLX4_PROT_ETH))
+ priv->port, 0, MLX4_PROT_ETH,
+ &priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ }
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +1192,8 @@ tx_err:
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
}
-
+ mlx4_en_destroy_drop_qp(priv);
+rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +1211,7 @@ void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_mc_list *mclist, *tmp;
int i;
u8 mc_list[16] = {0};
@@ -778,19 +1230,26 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- MLX4_PROT_ETH);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ MLX4_PROT_ETH, priv->broadcast_id);
+ list_for_each_entry(mclist, &priv->curr_list, list) {
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
+ mc_list, MLX4_PROT_ETH, mclist->reg_id);
}
mlx4_en_clear_list(dev);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ list_del(&mclist->list);
+ kfree(mclist);
+ }
+
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+ mlx4_en_destroy_drop_qp(priv);
+
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -915,6 +1374,11 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
+ priv->dev->rx_cpu_rmap = NULL;
+#endif
+
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
@@ -970,6 +1434,15 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
goto err;
}
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
+ if (!priv->dev->rx_cpu_rmap)
+ goto err;
+
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
return 0;
err:
@@ -1077,6 +1550,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_setup_tc = mlx4_en_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+#endif
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1194,6 +1670,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
NETIF_F_HW_VLAN_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK;
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac3187..f32e7030077 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -41,41 +41,75 @@
#include "mlx4_en.h"
-
-static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *ring_alloc,
- int i)
+static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct mlx4_en_rx_alloc *frags,
+ struct mlx4_en_rx_alloc *ring_alloc)
{
- struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ struct mlx4_en_frag_info *frag_info;
struct page *page;
dma_addr_t dma;
+ int i;
- if (page_alloc->offset == frag_info->last_offset) {
- /* Allocate new page */
- page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
- if (!page)
- return -ENOMEM;
-
- skb_frags[i].page = page_alloc->page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->page = page;
- page_alloc->offset = frag_info->frag_align;
- } else {
- page = page_alloc->page;
- get_page(page);
+ for (i = 0; i < priv->num_frags; i++) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset) {
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ goto out;
+ dma = dma_map_page(priv->ddev, page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, dma)) {
+ put_page(page);
+ goto out;
+ }
+ page_alloc[i].page = page;
+ page_alloc[i].dma = dma;
+ page_alloc[i].offset = frag_info->frag_align;
+ } else {
+ page_alloc[i].page = ring_alloc[i].page;
+ get_page(ring_alloc[i].page);
+ page_alloc[i].dma = ring_alloc[i].dma;
+ page_alloc[i].offset = ring_alloc[i].offset +
+ frag_info->frag_stride;
+ }
+ }
- skb_frags[i].page = page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->offset += frag_info->frag_stride;
+ for (i = 0; i < priv->num_frags; i++) {
+ frags[i] = ring_alloc[i];
+ dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ ring_alloc[i] = page_alloc[i];
+ rx_desc->data[i].addr = cpu_to_be64(dma);
}
- dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
- skb_frags[i].offset, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- rx_desc->data[i].addr = cpu_to_be64(dma);
+
return 0;
+
+
+out:
+ while (i--) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset)
+ dma_unmap_page(priv->ddev, page_alloc[i].dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ put_page(page_alloc[i].page);
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frags,
+ int i)
+{
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+
+ if (frags[i].offset == frag_info->last_offset) {
+ dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ if (frags[i].page)
+ put_page(frags[i].page);
}
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
if (!page_alloc->page)
goto out;
+ page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ goto out;
+ }
page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page);
@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
out:
while (i--) {
page_alloc = &ring->page_alloc[i];
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page));
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
}
-
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
- struct skb_frag_struct *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
}
}
-
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct page_frag *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
- int i;
+ struct mlx4_en_rx_alloc *frags = ring->rx_info +
+ (index << priv->log_rx_info);
- for (i = 0; i < priv->num_frags; i++)
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
- goto err;
-
- return 0;
-
-err:
- while (i--) {
- dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
- pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[i].page);
- }
- return -ENOMEM;
+ return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
- struct page_frag *skb_frags;
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
- dma_addr_t dma;
+ struct mlx4_en_rx_alloc *frags;
int nr;
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
- en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
- dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[nr].page);
+ mlx4_en_free_frag(priv, frags, nr);
}
}
@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = -ENOMEM;
int tmp;
-
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
- sizeof(struct skb_frag_struct));
+ sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info)
return -ENOMEM;
@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
- /* Initailize all descriptors */
+ /* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
@@ -389,6 +407,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv, ring);
+#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -401,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
}
-/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
+ struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
- struct mlx4_en_rx_alloc *page_alloc,
int length)
{
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
@@ -414,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
int nr;
dma_addr_t dma;
- /* Collect used fragments while replacing them in the HW descirptors */
+ /* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
+ if (!frags[nr].page)
+ goto fail;
- /* Save page reference in skb */
- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
- skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
+ DMA_FROM_DEVICE);
- /* Allocate a replacement page */
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
- goto fail;
-
- /* Unmap buffer */
- dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
- PCI_DMA_FROMDEVICE);
+ /* Save page reference in skb */
+ get_page(frags[nr].page);
+ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
+ skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
+ skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb->truesize += frag_info->frag_stride;
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
@@ -442,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
return nr;
fail:
- /* Drop all accumulated fragments (which have already been replaced in
- * the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
__skb_frag_unref(&skb_frags_rx[nr]);
@@ -454,8 +469,7 @@ fail:
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *page_alloc,
+ struct mlx4_en_rx_alloc *frags,
unsigned int length)
{
struct sk_buff *skb;
@@ -473,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
- * synch buffers for the copy */
+ * sync buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_for_device(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
skb->tail += length;
} else {
-
/* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
- skb, page_alloc, length);
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
+ skb, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
@@ -526,12 +537,25 @@ out_loopback:
dev_kfree_skb_any(skb);
}
+static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ int index = ring->prod & ring->size_mask;
+
+ while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+ if (mlx4_en_prepare_rx_desc(priv, ring, index))
+ break;
+ ring->prod++;
+ index = ring->prod & ring->size_mask;
+ }
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- struct page_frag *skb_frags;
+ struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -540,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int ip_summed;
struct ethhdr *ethh;
+ dma_addr_t dma;
u64 s_mac;
if (!priv->port_up)
@@ -555,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
/*
@@ -579,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */
- ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
- skb_frags[0].offset);
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing
@@ -612,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!gro_skb)
goto next;
- nr = mlx4_en_complete_rx_desc(
- priv, rx_desc,
- skb_frags, gro_skb,
- ring->page_alloc, length);
+ nr = mlx4_en_complete_rx_desc(priv,
+ rx_desc, frags, gro_skb,
+ length);
if (!nr)
goto next;
@@ -651,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring->csum_none++;
}
- skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
- ring->page_alloc, length);
+ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
priv->stats.rx_dropped++;
goto next;
@@ -678,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
netif_receive_skb(skb);
next:
+ for (nr = 0; nr < priv->num_frags; nr++)
+ mlx4_en_free_frag(priv, frags, nr);
+
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index];
@@ -693,7 +722,7 @@ out:
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
- ring->prod += polled; /* Polled descriptors were realocated in place */
+ mlx4_en_refill_rx_buffers(priv, ring);
mlx4_en_update_rx_prod_db(ring);
return polled;
}
@@ -782,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
- priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
@@ -844,6 +873,36 @@ out:
return err;
}
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
+{
+ int err;
+ u32 qpn;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
+ if (err) {
+ en_err(priv, "Failed reserving drop qpn\n");
+ return err;
+ }
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
+ if (err) {
+ en_err(priv, "Failed allocating drop qp\n");
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
+{
+ u32 qpn;
+
+ qpn = priv->drop_qp.qpn;
+ mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+}
+
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
@@ -954,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}
-
-
-
-
-
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index bce98d9c003..cd48337cbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -39,6 +39,7 @@
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
+#include <linux/cpu_rmap.h>
#include "mlx4.h"
#include "fw.h"
@@ -1060,7 +1061,8 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
+ int *vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1074,6 +1076,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
snprintf(priv->eq_table.irq_names +
vec * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, "%s", name);
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap) {
+ err = irq_cpu_rmap_add(rmap,
+ priv->eq_table.eq[vec].irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
err = request_irq(priv->eq_table.eq[vec].irq,
mlx4_msi_x_interrupt, 0,
&priv->eq_table.irq_names[vec<<5],
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9c83bb8151e..1d70657058a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
static const char * const fname[] = {
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
- [2] = "RSS XOR Hash Function support"
+ [2] = "RSS XOR Hash Function support",
+ [3] = "Device manage flow steering support"
};
int i;
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
+#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
+ dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
+ dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
+#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
+#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
+#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
+#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
+#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
-
- MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
- MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
- MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
+ cpu_to_be32(1 <<
+ INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ /* Enable Ethernet flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_ETH_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
+ /* Enable IPoIB flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_IB_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
+ } else {
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
+ MLX4_PUT(inbox, (u8) (1 << 3),
+ INIT_HCA_UC_STEERING_OFFSET);
+ }
/* TPT attributes */
@@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
- MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ } else {
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ }
/* TPT attributes */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399e4b7..83fcbbf1b16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int fs_log_max_ucast_qp_range_size;
+ int fs_max_num_qp_per_entry;
u64 flags;
u64 flags2;
int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
+ u8 fs_hash_enable_bits;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a0313de122d..42645166bae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
" 10 gives 248.range: 9<="
- " log_num_mgm_entry_size <= 12");
+ " log_num_mgm_entry_size <= 12."
+ " Not in use with device managed"
+ " flow steering");
#define MLX4_VF (1 << 0)
@@ -243,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -274,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ } else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+
/* Sense port always allowed on supported devices for ConnectX1 and 2 */
if (dev->pdev->device != 0x1003)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -967,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
/*
- * It's not strictly required, but for simplicity just map the
- * whole multicast group table now. The table isn't very big
- * and it's a lot easier than trying to track ref counts.
+ * For flow steering device managed mode it is required to use
+ * mlx4_init_icm_table. For B0 steering mode it's not strictly
+ * required, but for simplicity just map the whole multicast
+ * group table now. The table isn't very big and it's a lot
+ * easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base,
@@ -1205,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ priv->fs_hash_mode = MLX4_FS_L2_HASH;
+
+ switch (priv->fs_hash_mode) {
+ case MLX4_FS_L2_HASH:
+ init_hca.fs_hash_enable_bits = 0;
+ break;
+
+ case MLX4_FS_L2_L3_L4_HASH:
+ /* Enable flow steering with
+ * udp unicast and tcp unicast
+ */
+ init_hca.fs_hash_enable_bits =
+ MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
+ break;
+ }
+
profile = default_profile;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ profile.num_mcg = MLX4_FS_NUM_MCG;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
&init_hca);
@@ -1539,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
- + MSIX_LEGACY_SZ, MAX_MSIX);
+ min_t(int, netif_get_num_default_rss_queues() + 1,
+ MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98e402..4ec3835e1bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,7 +54,12 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
+ else
+ return min((1 << mlx4_log_num_mgm_entry_size),
+ MLX4_MAX_MGM_ENTRY_SIZE);
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +67,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
}
+static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ u32 size,
+ u64 *reg_id)
+{
+ u64 imm;
+ int err = 0;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ *reg_id = imm;
+
+ return err;
+}
+
+static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
+{
+ int err = 0;
+
+ err = mlx4_cmd(dev, regid, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ return err;
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
@@ -614,6 +648,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be32 ctrl;
+ __be32 vf_vep_port;
+ __be32 qpn;
+ __be32 reserved;
+};
+
+static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
+ struct mlx4_net_trans_rule_hw_ctrl *hw)
+{
+ static const u8 __promisc_mode[] = {
+ [MLX4_FS_PROMISC_NONE] = 0x0,
+ [MLX4_FS_PROMISC_UPLINK] = 0x1,
+ [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
+ [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
+ };
+
+ u32 dw = 0;
+
+ dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+ dw |= ctrl->exclusive ? (1 << 2) : 0;
+ dw |= ctrl->allow_loopback ? (1 << 3) : 0;
+ dw |= __promisc_mode[ctrl->promisc_mode] << 8;
+ dw |= ctrl->priority << 16;
+
+ hw->ctrl = cpu_to_be32(dw);
+ hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->qpn = cpu_to_be32(ctrl->qpn);
+}
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ };
+};
+
+static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
+ struct _rule_hw *rule_hw)
+{
+ static const u16 __sw_id_hw[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
+ [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
+ [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ };
+
+ static const size_t __rule_hw_sz[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] =
+ sizeof(struct mlx4_net_trans_rule_hw_eth),
+ [MLX4_NET_TRANS_RULE_ID_IB] =
+ sizeof(struct mlx4_net_trans_rule_hw_ib),
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] =
+ sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+ [MLX4_NET_TRANS_RULE_ID_TCP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_UDP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ };
+ if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+ return -EINVAL;
+ }
+ memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+ rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
+ rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+
+ switch (spec->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
+ ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
+ ETH_ALEN);
+ if (spec->eth.ether_type_enable) {
+ rule_hw->eth.ether_type_enable = 1;
+ rule_hw->eth.ether_type = spec->eth.ether_type;
+ }
+ rule_hw->eth.vlan_id = spec->eth.vlan_id;
+ rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ rule_hw->ib.qpn = spec->ib.r_qpn;
+ rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
+ memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
+ memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ return -EOPNOTSUPP;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
+ rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
+ rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
+ rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
+ rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
+ rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
+ rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __rule_hw_sz[spec->id];
+}
+
+static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
+ struct mlx4_net_trans_rule *rule)
+{
+#define BUF_SIZE 256
+ struct mlx4_spec_list *cur;
+ char buf[BUF_SIZE];
+ int len = 0;
+
+ mlx4_err(dev, "%s", str);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "port = %d prio = 0x%x qp = 0x%x ",
+ rule->port, rule->priority, rule->qpn);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ switch (cur->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dmac = %pM ", &cur->eth.dst_mac);
+ if (cur->eth.ether_type)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "ethertype = 0x%x ",
+ be16_to_cpu(cur->eth.ether_type));
+ if (cur->eth.vlan_id)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "vlan-id = %d ",
+ be16_to_cpu(cur->eth.vlan_id));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ if (cur->ipv4.src_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-ip = %pI4 ",
+ &cur->ipv4.src_ip);
+ if (cur->ipv4.dst_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-ip = %pI4 ",
+ &cur->ipv4.dst_ip);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ if (cur->tcp_udp.src_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-port = %d ",
+ be16_to_cpu(cur->tcp_udp.src_port));
+ if (cur->tcp_udp.dst_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-port = %d ",
+ be16_to_cpu(cur->tcp_udp.dst_port));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid = %pI6\n", cur->ib.dst_gid);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid-mask = %pI6\n",
+ cur->ib.dst_gid_msk);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ break;
+
+ default:
+ break;
+ }
+ }
+ len += snprintf(buf + len, BUF_SIZE - len, "\n");
+ mlx4_err(dev, "%s", buf);
+
+ if (len >= BUF_SIZE)
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+}
+
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_spec_list *cur;
+ u32 size = 0;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
+ trans_rule_ctrl_to_hw(rule, mailbox->buf);
+
+ size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ ret = parse_trans_rule(dev, cur, mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
+
+ ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
+ if (ret == -ENOMEM)
+ mlx4_err_rule(dev,
+ "mcg table is full. Fail to register network rule.\n",
+ rule);
+ else if (ret)
+ mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_attach);
+
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
+{
+ int err;
+
+ err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
+ if (err)
+ mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
+ reg_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -866,49 +1205,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol prot)
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
+
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.allow_loopback = ~block_mcast_loopback;
+ rule.port = port;
+ rule.qpn = qp->qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ switch (prot) {
+ case MLX4_PROT_ETH:
+ spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
+ memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ break;
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 1,
- block_mcast_loopback, prot);
+ case MLX4_PROT_IB_IPV6:
+ spec.id = MLX4_NET_TRANS_RULE_ID_IB;
+ memcpy(spec.ib.dst_gid, gid, 16);
+ memset(&spec.ib.dst_gid_msk, 0xff, 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ list_add_tail(&spec.list, &rule.list);
- return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
- prot, MLX4_MC_STEER);
+ return mlx4_flow_attach(dev, &rule, reg_id);
+ }
+
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot)
+ enum mlx4_protocol prot, u64 reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_flow_detach(dev, reg_id);
- return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
+ u32 qpn, enum mlx4_net_trans_promisc_mode mode)
+{
+ struct mlx4_net_trans_rule rule;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p != 0)
+ return -1;
+
+ rule.promisc_mode = mode;
+ rule.port = port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+ mlx4_err(dev, "going promisc on %x\n", port);
+
+ return mlx4_flow_attach(dev, &rule, regid_p);
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
+
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode)
+{
+ int ret;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p == 0)
+ return -1;
+
+ ret = mlx4_flow_detach(dev, *regid_p);
+ if (ret == 0)
+ *regid_p = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
+
int mlx4_unicast_attach(struct mlx4_dev *dev,
struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -924,10 +1373,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -968,9 +1413,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
@@ -980,9 +1422,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
@@ -992,9 +1431,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
@@ -1004,9 +1440,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
@@ -1019,6 +1452,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ /* No need for mcg_table when fw managed the mcg table*/
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 0;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
@@ -1031,5 +1468,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
- mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d20220762..d2c436b10fb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
#define DRV_VERSION "1.1"
#define DRV_RELDATE "Dec, 2011"
+#define MLX4_FS_UDP_UC_EN (1 << 1)
+#define MLX4_FS_TCP_UC_EN (1 << 2)
+#define MLX4_FS_NUM_OF_L2_ADDR 8
+#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
+#define MLX4_FS_NUM_MCG (1 << 17)
+
+enum {
+ MLX4_FS_L2_HASH = 0,
+ MLX4_FS_L2_L3_L4_HASH,
+};
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
RES_VLAN,
RES_EQ,
RES_COUNTER,
+ RES_FS_RULE,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -509,7 +522,7 @@ struct slave_list {
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
- struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
};
@@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context {
struct mlx4_mac_entry {
u64 mac;
+ u64 reg_id;
};
struct mlx4_port_info {
@@ -776,6 +790,7 @@ struct mlx4_priv {
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
int reserved_mtts;
+ int fs_hash_mode;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1032,7 +1047,7 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
/* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type,
- int resource_id, int *slave);
+ u64 resource_id, int *slave);
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
@@ -1117,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 225c20d4790..5f1ab105deb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -43,6 +43,7 @@
#ifdef CONFIG_MLX4_EN_DCB
#include <linux/dcbnl.h>
#endif
+#include <linux/cpu_rmap.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -75,6 +76,10 @@
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
+#define MAX_NUM_OF_FS_RULES 256
+
+#define MLX4_EN_FILTER_HASH_SHIFT 4
+#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
#define MAX_DESC_SIZE 512
@@ -106,7 +111,7 @@ enum {
#define MLX4_EN_MAX_TX_SIZE 8192
#define MLX4_EN_MAX_RX_SIZE 8192
-/* Minimum ring size for our page-allocation sceme to work */
+/* Minimum ring size for our page-allocation scheme to work */
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
@@ -227,6 +232,7 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
+ dma_addr_t dma;
u16 offset;
};
@@ -404,6 +410,19 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+enum mlx4_en_mclist_act {
+ MCLIST_NONE,
+ MCLIST_REM,
+ MCLIST_ADD,
+};
+
+struct mlx4_en_mc_list {
+ struct list_head list;
+ enum mlx4_en_mclist_act action;
+ u8 addr[ETH_ALEN];
+ u64 reg_id;
+};
+
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
@@ -422,6 +441,11 @@ struct mlx4_en_frag_info {
#endif
+struct ethtool_flow_id {
+ struct ethtool_rx_flow_spec flow_spec;
+ u64 id;
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -431,6 +455,7 @@ struct mlx4_en_priv {
struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
+ struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -480,6 +505,7 @@ struct mlx4_en_priv {
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_qp drop_qp;
struct work_struct mcast_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
@@ -489,8 +515,9 @@ struct mlx4_en_priv {
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
u64 stats_bitmap;
- char *mc_addrs;
- int mc_addrs_cnt;
+ struct list_head mc_list;
+ struct list_head curr_list;
+ u64 broadcast_id;
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
bool wol;
@@ -501,6 +528,13 @@ struct mlx4_en_priv {
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
#endif
+#ifdef CONFIG_RFS_ACCEL
+ spinlock_t filters_lock;
+ int last_filter_id;
+ struct list_head filters;
+ struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
+#endif
+
};
enum mlx4_en_wol {
@@ -565,6 +599,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
@@ -578,6 +614,11 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
#endif
+#ifdef CONFIG_RFS_ACCEL
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring);
+#endif
+
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a8fb52992c6..028833ffc56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -39,7 +39,6 @@
#include "mlx4.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0xffffffffffffULL
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
@@ -75,21 +74,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u64 *reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
__be64 be_mac;
int err;
- qp.qpn = *qpn;
-
- mac &= 0xffffffffffffULL;
+ mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
@@ -97,19 +129,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn)
+ u64 mac, int qpn, u64 reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ __be64 be_mac;
- qp.qpn = qpn;
- mac &= 0xffffffffffffULL;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
+ qp.qpn = qpn;
+ mac &= MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ mlx4_err(dev, "Invalid steering mode.\n");
+ }
}
static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +187,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
+ u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
@@ -155,7 +199,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
return err;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
@@ -167,7 +211,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto qp_err;
}
- err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
@@ -177,6 +221,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto alloc_err;
}
entry->mac = mac;
+ entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
@@ -186,7 +231,7 @@ insert_err:
kfree(entry);
alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn);
+ mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +251,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
@@ -359,15 +405,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
+ entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ err = mlx4_uc_steer_add(dev, port, entry->mac,
+ &qpn, &entry->reg_id);
return err;
}
@@ -803,8 +852,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc928d52..9ee4725363d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
- dev->caps.num_mgms = profile[i].num >> 1;
- dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz =
ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
- init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_mgms = profile[i].num;
+ } else {
+ init_hca->log_mc_hash_sz =
+ profile[i].log_num - 1;
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7f6ab..94ceddd17ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -41,13 +41,12 @@
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
+#include <linux/if_ether.h>
#include "mlx4.h"
#include "fw.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0x7fffffffffffffffULL
-#define ETH_ALEN 6
struct mac_res {
struct list_head list;
@@ -57,7 +56,8 @@ struct mac_res {
struct res_common {
struct list_head list;
- u32 res_id;
+ struct rb_node node;
+ u64 res_id;
int owner;
int state;
int from_state;
@@ -189,6 +189,58 @@ struct res_xrcdn {
int port;
};
+enum res_fs_rule_states {
+ RES_FS_RULE_BUSY = RES_ANY_BUSY,
+ RES_FS_RULE_ALLOCATED,
+};
+
+struct res_fs_rule {
+ struct res_common com;
+};
+
+static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct res_common *res = container_of(node, struct res_common,
+ node);
+
+ if (res_id < res->res_id)
+ node = node->rb_left;
+ else if (res_id > res->res_id)
+ node = node->rb_right;
+ else
+ return res;
+ }
+ return NULL;
+}
+
+static int res_tracker_insert(struct rb_root *root, struct res_common *res)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct res_common *this = container_of(*new, struct res_common,
+ node);
+
+ parent = *new;
+ if (res->res_id < this->res_id)
+ new = &((*new)->rb_left);
+ else if (res->res_id > this->res_id)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&res->node, parent, new);
+ rb_insert_color(&res->node, root);
+
+ return 0;
+}
+
/* For Debug uses */
static const char *ResourceType(enum mlx4_resource rt)
{
@@ -201,6 +253,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MAC: return "RES_MAC";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
+ case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
};
@@ -228,8 +281,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
- INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
- GFP_ATOMIC|__GFP_NOWARN);
+ priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0 ;
@@ -277,11 +329,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
- res_id);
+ return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res)
{
@@ -307,7 +359,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
- mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
ResourceType(type), r->res_id);
if (res)
@@ -320,7 +372,7 @@ exit:
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
- int res_id, int *slave)
+ u64 res_id, int *slave)
{
struct res_common *r;
@@ -341,7 +393,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
return err;
}
-static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
@@ -473,7 +525,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+static struct res_common *alloc_fs_rule_tr(u64 id)
+{
+ struct res_fs_rule *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_FS_RULE_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
@@ -506,6 +572,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
+ case RES_FS_RULE:
+ ret = alloc_fs_rule_tr(id);
+ break;
default:
return NULL;
}
@@ -515,7 +584,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
return ret;
}
-static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
@@ -523,7 +592,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct radix_tree_root *root = &tracker->res_tree[type];
+ struct rb_root *root = &tracker->res_tree[type];
res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
if (!res_arr)
@@ -546,7 +615,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
err = -EEXIST;
goto undo;
}
- err = radix_tree_insert(root, base + i, res_arr[i]);
+ err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
@@ -559,7 +628,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
undo:
for (--i; i >= base; --i)
- radix_tree_delete(&tracker->res_tree[type], i);
+ rb_erase(&res_arr[i]->node, root);
spin_unlock_irq(mlx4_tlock(dev));
@@ -638,6 +707,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
return 0;
}
+static int remove_fs_rule_ok(struct res_fs_rule *res)
+{
+ if (res->com.state == RES_FS_RULE_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_FS_RULE_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +758,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
+ case RES_FS_RULE:
+ return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
-static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
- int i;
+ u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +776,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
@@ -710,8 +791,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
}
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
- radix_tree_delete(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
+ rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
@@ -733,7 +814,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -741,7 +822,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
else {
switch (state) {
case RES_QP_BUSY:
- mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
@@ -750,7 +831,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
@@ -759,7 +840,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.state == RES_QP_HW)
break;
else {
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
@@ -779,7 +860,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
- *qp = (struct res_qp *)r;
+ *qp = r;
}
}
@@ -797,7 +878,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -832,7 +913,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
- *mpt = (struct res_mpt *)r;
+ *mpt = r;
}
}
@@ -850,7 +931,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -898,7 +979,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
int err;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -952,7 +1033,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -1001,7 +1082,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1096,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2776,60 @@ ex_put:
return err;
}
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
+ vhcr->in_modifier, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ /* detach rule*/
+ mlx4_cmd(dev, vhcr->out_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ }
+ return err;
+}
+
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ return err;
+ }
+
+ err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ return err;
+}
+
enum {
BUSY_MAX_RETRIES = 10
};
@@ -2751,7 +2886,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
- "%s id 0x%x is busy\n",
+ "%s id 0x%llx is busy\n",
ResourceType(type),
r->res_id);
++busy;
@@ -2817,8 +2952,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_QP],
- qp->com.res_id);
+ rb_erase(&qp->com.node,
+ &tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(qp);
@@ -2888,8 +3023,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_SRQ],
- srqn);
+ rb_erase(&srq->com.node,
+ &tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(srq);
@@ -2954,8 +3089,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_CQ],
- cqn);
+ rb_erase(&cq->com.node,
+ &tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(cq);
@@ -3017,8 +3152,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
case RES_MPT_RESERVED:
__mlx4_mr_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MPT],
- mptn);
+ rb_erase(&mpt->com.node,
+ &tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mpt);
@@ -3086,8 +3221,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MTT],
- base);
+ rb_erase(&mtt->com.node,
+ &tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mtt);
@@ -3104,6 +3239,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev));
}
+static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *fs_rule_list =
+ &tracker->slave_list[slave].res_list[RES_FS_RULE];
+ struct res_fs_rule *fs_rule;
+ struct res_fs_rule *tmp;
+ int state;
+ u64 base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_FS_RULE);
+ if (err)
+ mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
+ slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (fs_rule->com.owner == slave) {
+ base = fs_rule->com.res_id;
+ state = fs_rule->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_FS_RULE_ALLOCATED:
+ /* detach rule */
+ err = mlx4_cmd(dev, base, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ rb_erase(&fs_rule->com.node,
+ &tracker->res_tree[RES_FS_RULE]);
+ list_del(&fs_rule->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3320,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_EQ],
- eqn);
+ rb_erase(&eq->com.node,
+ &tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
@@ -3191,7 +3378,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
index = counter->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
@@ -3220,7 +3408,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
+ rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3432,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
+ rem_slave_fs_rule(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 5e313e9a252..1540ebeb866 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -422,7 +422,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
*
* Get or create the initial mac address for the device and then set that
* into the station address register. If there is an EEPROM present, then
- * we try that. If no valid mac address is found we use random_ether_addr()
+ * we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23ac8f..38529edfe35 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/**
- * Supports:
+/* Supports:
* KS8851 16bit MLL chip from Micrel Inc.
*/
@@ -35,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/ks8851_mll.h>
#define DRV_NAME "ks8851_mll"
@@ -465,8 +464,7 @@ static int msg_enable;
#define BE1 0x2000 /* Byte Enable 1 */
#define BE0 0x1000 /* Byte Enable 0 */
-/**
- * register read/write calls.
+/* register read/write calls.
*
* All these calls issue transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
}
} /* ks_set_grpaddr */
-/*
+/**
* ks_clear_mcast - clear multicast information
*
* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
+ struct ks8851_mll_platform_data *pdata;
io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks_disable_qmu(ks);
ks_setup(ks);
ks_setup_int(ks);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
data = ks_rdreg16(ks, KS_OBCR);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
- /**
- * If you want to use the default MAC addr,
- * comment out the 2 functions below.
- */
+ /* overwriting the default MAC address */
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ netdev_err(netdev, "No platform data\n");
+ err = -ENODEV;
+ goto err_pdata;
+ }
+ memcpy(ks->mac_addr, pdata->mac_addr, 6);
+ if (!is_valid_ether_addr(ks->mac_addr)) {
+ /* Use random MAC address if none passed */
+ eth_random_addr(ks->mac_addr);
+ netdev_info(netdev, "Using random mac address\n");
+ }
+ netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
+
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
- random_ether_addr(netdev->dev_addr);
ks_set_mac(ks, netdev->dev_addr);
id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
+err_pdata:
+ unregister_netdev(netdev);
err_register:
err_get_irq:
iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0262a..318fee91c79 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
hw->rx_stop = 2;
}
-/*
+/**
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
- if (dma_buf->skb && !dma_buf->dma) {
- dma_buf->skb->dev = adapter->dev;
+ if (dma_buf->skb && !dma_buf->dma)
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
- }
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
@@ -4881,8 +4879,8 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
- ((CHECKSUM_PARTIAL == skb->ip_summed) &&
- (ETH_P_IPV6 == htons(skb->protocol)))) {
+ (CHECKSUM_PARTIAL == skb->ip_summed &&
+ skb->protocol == htons(ETH_P_IPV6))) {
struct sk_buff *org_skb = skb;
skb = netdev_alloc_skb(dev, org_skb->len);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc983c..fa85cf1353f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices = 1;
msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- ncpus = num_online_cpus();
+ ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || msix_cap == 0 ||
(myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb367582c1e..d958c229937 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
} while (cnt < 20);
return ret;
}
-/*
+/**
* check_pci_device_id - Checks if the device id is supported
* @id : device id
* Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
}
/**
- * s2io_set_mac_addr driver entry point
+ * s2io_set_mac_addr - driver entry point
*/
static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
}
/**
- * s2io-link_test - verifies the link state of the nic
+ * s2io_link_test - verifies the link state of the nic
* @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp - private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data - variable that returns the result of each of the test
+ * @data: variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- (u64 *)&temp0_64,
- (u64 *)&temp1_64,
- (u64 *)&temp2_64,
+ &temp0_64,
+ &temp1_64,
+ &temp2_64,
size) == -ENOMEM) {
return 0;
}
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct net_device *dev = ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
static void s2io_link(struct s2io_nic *sp, int link)
{
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
return -1;
}
- *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ *ip = (struct iphdr *)(buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10ae08..32d06824fe3 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
break;
pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
- vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+ vpath = &hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64f0fe..9e0c1eed5dc 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
/* misaligned, free current one and try allocating
* size + VXGE_CACHE_LINE_SIZE memory
*/
- kfree((void *) vaddr);
+ kfree(vaddr);
size += VXGE_CACHE_LINE_SIZE;
realloc_flag = 1;
goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c31914..de219044351 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)vdev->devh;
+ hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
@@ -3131,12 +3131,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
u64 packets, bytes, multicast;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_bh(&rxstats->syncp);
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
@@ -3146,11 +3146,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_dropped += rxstats->rx_dropped;
do {
- start = u64_stats_fetch_begin(&txstats->syncp);
+ start = u64_stats_fetch_begin_bh(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry(&txstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
return 0;
if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus = num_online_cpus();
+ driver_config->g_no_cpus =
+ netif_get_num_default_rss_queues();
driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
continue;
vxge_debug_ll_config(VXGE_TRACE,
"%s: MTU size - %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].mtu);
vxge_debug_init(VXGE_TRACE,
"%s: VLAN tag stripping %s", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].fifo.max_frags);
break;
}
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
VXGE_FW_VER(maj, min, 0)) {
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.\n"
- "Please get the latest version from "
- "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ " be used with this driver.",
VXGE_DRIVER_NAME, maj, min, bld);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e7552ec..36ca40f8f24 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-/**
- * #define VXGE_DEBUG_INIT: debug for initialization functions
+/* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
* #define VXGE_DEBUG_RX : debug recevice related functions
* #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa264da..99749bd07d7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
/* notify driver */
if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(
- (struct __vxge_hw_device *)hldev,
+ hldev->uld_callbacks->crit_err(hldev,
type, vp_id);
out:
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
- vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ vxge_assert((rxdp)->host_control !=
0);
++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c4f3f..f45def01a98 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
}
/**
- * nv_update_linkspeed: Setup the MAC according to the link partner
+ * nv_update_linkspeed - Setup the MAC according to the link partner
* @dev: Network device to be configured
*
* The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
return IRQ_HANDLED;
}
-/**
- * All _optimized functions are used to help increase performance
+/* All _optimized functions are used to help increase performance
* (reduce CPU and increase throughput). They use descripter version 3,
* compiler directives, and reduce memory accesses.
*/
@@ -3776,7 +3775,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & NVREG_IRQ_RECOVER_ERROR) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -3786,7 +3785,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
if (unlikely(i > max_interrupt_work)) {
@@ -5183,6 +5182,7 @@ static const struct ethtool_ops ops = {
.get_ethtool_stats = nv_get_ethtool_stats,
.get_sset_count = nv_get_sset_count,
.self_test = nv_self_test,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* The mgmt unit and driver use a semaphore to access the phy during init */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 083d6715335..4069edab229 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -44,7 +44,6 @@
#include <linux/of_net.h>
#include <linux/types.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <mach/board.h>
#include <mach/platform.h>
@@ -52,7 +51,6 @@
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
-#define PHYDEF_ADDR 0x00
#define ENET_MAXF_SIZE 1536
#define ENET_RX_DESC 48
@@ -416,9 +414,6 @@ static bool use_iram_for_net(struct device *dev)
#define TXDESC_CONTROL_LAST (1 << 30)
#define TXDESC_CONTROL_INT (1 << 31)
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
- struct net_device *ndev);
-
/*
* Structure of a TX/RX descriptors and RX status
*/
@@ -440,7 +435,7 @@ struct netdata_local {
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
- struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int skblen[ENET_TX_DESC];
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
@@ -903,12 +898,11 @@ err_out:
static void __lpc_handle_xmit(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct sk_buff *skb;
u32 txcidx, *ptxstat, txstat;
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
while (pldat->last_tx_idx != txcidx) {
- skb = pldat->skb[pldat->last_tx_idx];
+ unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
/* A buffer is available, get buffer status */
ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,9 +939,8 @@ static void __lpc_handle_xmit(struct net_device *ndev)
} else {
/* Update stats */
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += skblen;
}
- dev_kfree_skb_irq(skb);
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
}
@@ -1132,7 +1125,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
/* Save the buffer and increment the buffer counter */
- pldat->skb[txidx] = skb;
+ pldat->skblen[txidx] = len;
pldat->num_used_tx_buffs++;
/* Start transmit */
@@ -1147,6 +1140,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irq(&pldat->lock);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1442,7 +1436,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res->start);
netdev_dbg(ndev, "IO address size :%d\n",
res->end - res->start + 1);
- netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084ad22..5ae03e815ee 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_plat_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed-EBUSY
*/
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_setup_init_funcs - Initializes function pointers
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The buffer to store the 16-bit read.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The value to write.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_read_mac_addr - Reads MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d529e..9dbf38c10a6 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
* pch_gbe_get_settings - Get device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
* pch_gbe_set_settings - Set device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
* pch_gbe_set_wol - Turn Wake-on-Lan on or off
* @netdev: Network interface device structure
* @wol: Pointer of wake-on-Lan information straucture
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
/**
* pch_gbe_nway_reset - Restart autonegotiation
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
* pch_gbe_set_pauseparam - Set pause paramters
* @netdev: Network interface device structure
* @pause: Pause parameters structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64ee71..b1006563f73 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
*/
s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
/**
* pch_gbe_alloc_queues - Allocate memory for all rings
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_init_phy - Initialize PHY
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
* @netdev: Network interface device structure
* @addr: Phy ID
* @reg: Access location
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
* @data: Pointer to a network interface device structure
- * Returns
+ * Returns:
* - IRQ_HANDLED: Our interrupt
* - IRQ_NONE: Not our interrupt
*/
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
* pch_gbe_clean_tx - Reclaim resources after transmit completes
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
* @rx_ring: Rx descriptor ring
* @work_done: Completed count
* @work_to_do: Request count
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
* @adapter: Board private structure
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
/**
* pch_gbe_request_irq - Allocate an interrupt line
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_up - Up GbE network device
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_open - Called when a network interface is made active
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2097,7 +2097,7 @@ err_setup_tx:
/**
* pch_gbe_stop - Disables a network interface
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
*/
static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
* pch_gbe_xmit_frame - Packet transmitting start
* @skb: Socket buffer structure
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* - NETDEV_TX_OK: Normal end
* - NETDEV_TX_BUSY: Error end
*/
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
* pch_gbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: Network interface device structure
* @addr: Pointer to an address structure
- * Returns
+ * Returns:
* 0: Successfully
* -EADDRNOTAVAIL: Failed
*/
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
* pch_gbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: Network interface device structure
* @new_mtu: New value for maximum frame size
- * Returns
+ * Returns:
* 0: Successfully
* -EINVAL: Failed
*/
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* pch_gbe_set_features - Reset device after features changed
* @netdev: Network interface device structure
* @features: New features
- * Returns
+ * Returns:
* 0: HW state updated successfully
*/
static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
* @netdev: Network interface device structure
* @ifr: Pointer to ifr structure
* @cmd: Control command
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
* pch_gbe_napi_poll - NAPI receive and transfer polling callback
* @napi: Pointer of polling device struct
* @budget: The maximum number of a packet
- * Returns
+ * Returns:
* false: Exit the polling mode
* true: Continue the polling mode
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23bec809..8653c3b81f8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
/**
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
* @value: value
* @opt: option
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe54e62..eb3dfdbb642 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 79
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.79"
+#define _NETXEN_NIC_LINUX_SUBVERSION 80
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 39730403782..10468e7932d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
dump->len = mdump->md_dump_size;
else
dump->len = 0;
- dump->flag = mdump->md_capture_mask;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
dump->version = adapter->fw_version;
return 0;
}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
switch (val->flag) {
case NX_FORCE_FW_DUMP_KEY:
- if (!mdump->md_enabled)
- mdump->md_enabled = 1;
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
if (adapter->fw_mdump_rdy) {
netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a948bb7..946160fa584 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return 0;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
u32 port = adapter->physical_port;
u16 board_type = adapter->ahw.board_type;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124ef77..bc165f4d0f6 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1437,8 +1437,6 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
netdev->name, cable_len);
}
- netxen_advert_link_change(adapter, link_status);
-
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
@@ -1447,6 +1445,8 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
}
static void
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
} else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5dae4a..eaa1db9fec3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 28
-#define QLCNIC_LINUX_VERSIONID "5.0.28"
+#define _QLCNIC_LINUX_SUBVERSION 29
+#define QLCNIC_LINUX_VERSIONID "5.0.29"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
(((sts_data) >> 52) & 0x1)
#define qlcnic_get_lro_sts_seq_number(sts_data) \
((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
#define QLCNIC_RCODE_TIMEOUT 17
#define QLCNIC_DESTROY_CTX_RESET 0
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
/*
* Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db85244e8a..b8ead696141 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "card response timeout.\n");
+ dev_err(&pdev->dev, "CDRP response timeout.\n");
cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ switch (cmd->rsp.cmd) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ dev_err(&pdev->dev,
+ "CDRP command not supported: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ dev_err(&pdev->dev,
+ "CDRP requested action not permitted: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_INVALID:
+ dev_err(&pdev->dev,
+ "CDRP invalid or unknown cmd received: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ default:
+ dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
+ cmd->rsp.cmd);
+ }
} else if (rsp == QLCNIC_CDRP_RSP_OK) {
cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLCNIC_CAP0_LRO_MSS;
+
prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
msix_handler);
prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
- } else {
- dev_info(&adapter->pdev->dev,
- "%s: Get mac stats failed =%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced3195aad..28a6b28192e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40ed03..0bcda9c51e9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ad98f4d7919..212c1219327 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1136,6 +1136,8 @@ static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
+ u32 capab2;
+
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
vh = (struct vlan_ethhdr *)skb->data;
flags = FLAGS_VLAN_TAGGED;
vlan_tci = vh->h_vlan_TCI;
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df33f1..a131d7b5d2f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.30.00.00-01"
+#define DRV_VERSION "v1.00.00.31"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
+#define QLGE_MEZZ_SSYS_ID_068 0x0068
+#define QLGE_MEZZ_SSYS_ID_180 0x0180
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
- atomic_t queue_stopped; /* Turns queue off when full. */
struct delayed_work tx_work;
struct ql_adapter *qdev;
u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
+ /* Receive Mac Err stats */
+ u64 rx_code_err;
+ u64 rx_oversize_err;
+ u64 rx_undersize_err;
+ u64 rx_preamble_err;
+ u64 rx_frame_len_err;
+ u64 rx_crc_err;
+ u64 rx_err_count;
/*
* These stats come from offset 500h to 5C8h
* in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a74f3a..6f316ab2325 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
#include "qlge.h"
+struct ql_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+ {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+ {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+ {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+ QL_OFF(nic_stats.tx_mcast_pkts)},
+ {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+ QL_OFF(nic_stats.tx_bcast_pkts)},
+ {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+ QL_OFF(nic_stats.tx_ucast_pkts)},
+ {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+ QL_OFF(nic_stats.tx_ctl_pkts)},
+ {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+ QL_OFF(nic_stats.tx_pause_pkts)},
+ {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+ QL_OFF(nic_stats.tx_64_pkt)},
+ {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+ QL_OFF(nic_stats.tx_65_to_127_pkt)},
+ {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+ QL_OFF(nic_stats.tx_128_to_255_pkt)},
+ {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+ QL_OFF(nic_stats.tx_256_511_pkt)},
+ {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+ QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+ {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+ QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+ {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+ QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+ {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+ QL_OFF(nic_stats.tx_undersize_pkt)},
+ {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+ QL_OFF(nic_stats.tx_oversize_pkt)},
+ {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+ {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+ QL_OFF(nic_stats.rx_bytes_ok)},
+ {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+ {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+ QL_OFF(nic_stats.rx_pkts_ok)},
+ {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+ QL_OFF(nic_stats.rx_bcast_pkts)},
+ {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+ QL_OFF(nic_stats.rx_mcast_pkts)},
+ {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+ QL_OFF(nic_stats.rx_ucast_pkts)},
+ {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+ QL_OFF(nic_stats.rx_undersize_pkts)},
+ {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+ QL_OFF(nic_stats.rx_oversize_pkts)},
+ {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+ QL_OFF(nic_stats.rx_jabber_pkts)},
+ {"rx_undersize_fcerr_pkts",
+ QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+ QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+ {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+ QL_OFF(nic_stats.rx_drop_events)},
+ {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+ QL_OFF(nic_stats.rx_fcerr_pkts)},
+ {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+ QL_OFF(nic_stats.rx_align_err)},
+ {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+ QL_OFF(nic_stats.rx_symbol_err)},
+ {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+ QL_OFF(nic_stats.rx_mac_err)},
+ {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+ QL_OFF(nic_stats.rx_ctl_pkts)},
+ {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+ QL_OFF(nic_stats.rx_pause_pkts)},
+ {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+ QL_OFF(nic_stats.rx_64_pkts)},
+ {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+ QL_OFF(nic_stats.rx_65_to_127_pkts)},
+ {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+ QL_OFF(nic_stats.rx_128_255_pkts)},
+ {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+ QL_OFF(nic_stats.rx_256_511_pkts)},
+ {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+ QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+ {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+ QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+ {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+ QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+ {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+ QL_OFF(nic_stats.rx_len_err_pkts)},
+ {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+ QL_OFF(nic_stats.rx_code_err)},
+ {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+ QL_OFF(nic_stats.rx_oversize_err)},
+ {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+ QL_OFF(nic_stats.rx_undersize_err)},
+ {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+ QL_OFF(nic_stats.rx_preamble_err)},
+ {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+ QL_OFF(nic_stats.rx_frame_len_err)},
+ {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+ QL_OFF(nic_stats.rx_crc_err)},
+ {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+ QL_OFF(nic_stats.rx_err_count)},
+ {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+ {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+ {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+ {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+ {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+ {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+ {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+ {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+ {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+ {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+ {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+ {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+ {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+ {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+ {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+ {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+ {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+ QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -183,73 +325,19 @@ quit:
QL_DUMP_STAT(qdev);
}
-static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
- {"tx_pkts"},
- {"tx_bytes"},
- {"tx_mcast_pkts"},
- {"tx_bcast_pkts"},
- {"tx_ucast_pkts"},
- {"tx_ctl_pkts"},
- {"tx_pause_pkts"},
- {"tx_64_pkts"},
- {"tx_65_to_127_pkts"},
- {"tx_128_to_255_pkts"},
- {"tx_256_511_pkts"},
- {"tx_512_to_1023_pkts"},
- {"tx_1024_to_1518_pkts"},
- {"tx_1519_to_max_pkts"},
- {"tx_undersize_pkts"},
- {"tx_oversize_pkts"},
- {"rx_bytes"},
- {"rx_bytes_ok"},
- {"rx_pkts"},
- {"rx_pkts_ok"},
- {"rx_bcast_pkts"},
- {"rx_mcast_pkts"},
- {"rx_ucast_pkts"},
- {"rx_undersize_pkts"},
- {"rx_oversize_pkts"},
- {"rx_jabber_pkts"},
- {"rx_undersize_fcerr_pkts"},
- {"rx_drop_events"},
- {"rx_fcerr_pkts"},
- {"rx_align_err"},
- {"rx_symbol_err"},
- {"rx_mac_err"},
- {"rx_ctl_pkts"},
- {"rx_pause_pkts"},
- {"rx_64_pkts"},
- {"rx_65_to_127_pkts"},
- {"rx_128_255_pkts"},
- {"rx_256_511_pkts"},
- {"rx_512_to_1023_pkts"},
- {"rx_1024_to_1518_pkts"},
- {"rx_1519_to_max_pkts"},
- {"rx_len_err_pkts"},
- {"tx_cbfc_pause_frames0"},
- {"tx_cbfc_pause_frames1"},
- {"tx_cbfc_pause_frames2"},
- {"tx_cbfc_pause_frames3"},
- {"tx_cbfc_pause_frames4"},
- {"tx_cbfc_pause_frames5"},
- {"tx_cbfc_pause_frames6"},
- {"tx_cbfc_pause_frames7"},
- {"rx_cbfc_pause_frames0"},
- {"rx_cbfc_pause_frames1"},
- {"rx_cbfc_pause_frames2"},
- {"rx_cbfc_pause_frames3"},
- {"rx_cbfc_pause_frames4"},
- {"rx_cbfc_pause_frames5"},
- {"rx_cbfc_pause_frames6"},
- {"rx_cbfc_pause_frames7"},
- {"rx_nic_fifo_drop"},
-};
-
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ int index;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
- memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+ for (index = 0; index < QLGE_STATS_LEN; index++) {
+ memcpy(buf + index * ETH_GSTRING_LEN,
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
break;
}
}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return QLGE_TEST_LEN;
case ETH_SS_STATS:
- return ARRAY_SIZE(ql_stats_str_arr);
+ return QLGE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- struct nic_stats *s = &qdev->nic_stats;
+ int index, length;
+ length = QLGE_STATS_LEN;
ql_update_stats(qdev);
- *data++ = s->tx_pkts;
- *data++ = s->tx_bytes;
- *data++ = s->tx_mcast_pkts;
- *data++ = s->tx_bcast_pkts;
- *data++ = s->tx_ucast_pkts;
- *data++ = s->tx_ctl_pkts;
- *data++ = s->tx_pause_pkts;
- *data++ = s->tx_64_pkt;
- *data++ = s->tx_65_to_127_pkt;
- *data++ = s->tx_128_to_255_pkt;
- *data++ = s->tx_256_511_pkt;
- *data++ = s->tx_512_to_1023_pkt;
- *data++ = s->tx_1024_to_1518_pkt;
- *data++ = s->tx_1519_to_max_pkt;
- *data++ = s->tx_undersize_pkt;
- *data++ = s->tx_oversize_pkt;
- *data++ = s->rx_bytes;
- *data++ = s->rx_bytes_ok;
- *data++ = s->rx_pkts;
- *data++ = s->rx_pkts_ok;
- *data++ = s->rx_bcast_pkts;
- *data++ = s->rx_mcast_pkts;
- *data++ = s->rx_ucast_pkts;
- *data++ = s->rx_undersize_pkts;
- *data++ = s->rx_oversize_pkts;
- *data++ = s->rx_jabber_pkts;
- *data++ = s->rx_undersize_fcerr_pkts;
- *data++ = s->rx_drop_events;
- *data++ = s->rx_fcerr_pkts;
- *data++ = s->rx_align_err;
- *data++ = s->rx_symbol_err;
- *data++ = s->rx_mac_err;
- *data++ = s->rx_ctl_pkts;
- *data++ = s->rx_pause_pkts;
- *data++ = s->rx_64_pkts;
- *data++ = s->rx_65_to_127_pkts;
- *data++ = s->rx_128_255_pkts;
- *data++ = s->rx_256_511_pkts;
- *data++ = s->rx_512_to_1023_pkts;
- *data++ = s->rx_1024_to_1518_pkts;
- *data++ = s->rx_1519_to_max_pkts;
- *data++ = s->rx_len_err_pkts;
- *data++ = s->tx_cbfc_pause_frames0;
- *data++ = s->tx_cbfc_pause_frames1;
- *data++ = s->tx_cbfc_pause_frames2;
- *data++ = s->tx_cbfc_pause_frames3;
- *data++ = s->tx_cbfc_pause_frames4;
- *data++ = s->tx_cbfc_pause_frames5;
- *data++ = s->tx_cbfc_pause_frames6;
- *data++ = s->tx_cbfc_pause_frames7;
- *data++ = s->rx_cbfc_pause_frames0;
- *data++ = s->rx_cbfc_pause_frames1;
- *data++ = s->rx_cbfc_pause_frames2;
- *data++ = s->rx_cbfc_pause_frames3;
- *data++ = s->rx_cbfc_pause_frames4;
- *data++ = s->rx_cbfc_pause_frames5;
- *data++ = s->rx_cbfc_pause_frames6;
- *data++ = s->rx_cbfc_pause_frames7;
- *data++ = s->rx_nic_fifo_drop;
+ for (index = 0; index < length; index++) {
+ char *p = (char *)qdev +
+ ql_gstrings_stats[index].stat_offset;
+ *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ }
}
static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- /* What we support. */
- wol->supported = WAKE_MAGIC;
- /* What we've currently got set. */
- wol->wolopts = qdev->wol;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = qdev->wol;
+ }
}
static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ netif_info(qdev, drv, qdev->ndev,
+ "WOL is only supported for mezz card\n");
+ return -EOPNOTSUPP;
+ }
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
qdev->wol = wol->wolopts;
netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- if (!qdev->wol) {
- u32 wol = 0;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
- status == 0 ? "cleared successfully" : "clear failed",
- wol);
- }
-
return 0;
}
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
if (netif_running(ndev)) {
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33171d..3769f5711cc 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+{
+ struct nic_stats *stats = &qdev->nic_stats;
+
+ stats->rx_err_count++;
+
+ switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+ case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+ stats->rx_code_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+ stats->rx_oversize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+ stats->rx_undersize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+ stats->rx_preamble_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+ stats->rx_frame_len_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_CRC:
+ stats->rx_crc_err++;
+ default:
+ break;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- rx_ring->rx_errors++;
- goto err_out;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1546,7 +1567,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct iphdr *iph =
(struct iphdr *) ((u8 *)addr + ETH_HLEN);
if (!(iph->frag_off &
- cpu_to_be16(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
@@ -1654,7 +1665,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1968,7 +1969,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
+ return (unsigned long)length;
+ }
+
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if (atomic_read(&tx_ring->queue_stopped) &&
- (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
- "%s: shutting down tx queue %d du to lack of resources.\n",
+ "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
- atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
return NETDEV_TX_OK;
}
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
- atomic_set(&tx_ring->queue_stopped, 0);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
- }
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+ goto pci_alloc_err;
+
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+pci_alloc_err:
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, (int)num_online_cpus()));
+ min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index d1827e887f4..557a26545d7 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
@@ -1278,17 +1277,4 @@ static struct pci_driver r6040_driver = {
.remove = __devexit_p(r6040_remove_one),
};
-
-static int __init r6040_init(void)
-{
- return pci_register_driver(&r6040_driver);
-}
-
-
-static void __exit r6040_cleanup(void)
-{
- pci_unregister_driver(&r6040_driver);
-}
-
-module_init(r6040_init);
-module_exit(r6040_cleanup);
+module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d7a04e09110..b47d5b35024 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
+#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
+#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_36,
RTL_GIGA_MAC_VER_37,
RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -259,6 +264,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_39] =
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_40] =
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_41] =
+ _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
};
#undef _R
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
TWSI = 0xd2,
MCU = 0xd3,
#define NOW_IS_OOB (1 << 7)
+#define TX_EMPTY (1 << 5)
+#define RX_EMPTY (1 << 4)
+#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
#define EN_NDP (1 << 3)
#define EN_OOB_RESET (1 << 2)
+#define LINK_LIST_RDY (1 << 1)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
#define ERIAR_MASK_SHIFT 12
#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
#define OCPAR_FLAG 0x80000000
#define OCPAR_GPHY_WRITE_CMD 0x8000f060
#define OCPAR_GPHY_READ_CMD 0x0000f060
+ GPHY_OCP = 0xb8,
RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
MISC = 0xf0, /* 8168e only. */
#define TXPLA_RST (1 << 29)
+#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
#define PWM_EN (1 << 22)
+#define RXDV_GATED_EN (1 << 19)
+#define EARLY_TALLY_EN (1 << 16)
};
enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
u16 event_slow;
struct mdio_ops {
- void (*write)(void __iomem *, int, int);
- int (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ int (*read)(struct rtl8169_private *, int);
} mdio_ops;
struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
} jumbo_ops;
struct csi_ops {
- void (*write)(void __iomem *, int, int);
- u32 (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ u32 (*read)(struct rtl8169_private *, int);
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
} phy_action;
} *rtl_fw;
#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+
+ u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
+MODULE_FIRMWARE(FIRMWARE_8106E_1);
+MODULE_FIRMWARE(FIRMWARE_8168G_1);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -818,47 +844,114 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
}
}
+struct rtl_cond {
+ bool (*check)(struct rtl8169_private *);
+ const char *msg;
+};
+
+static void rtl_udelay(unsigned int d)
+{
+ udelay(d);
+}
+
+static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
+ void (*delay)(unsigned int), unsigned int d, int n,
+ bool high)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ delay(d);
+ if (c->check(tp) == high)
+ return true;
+ }
+ netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
+ c->msg, !high, n, d);
+ return false;
+}
+
+static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
+}
+
+static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+}
+
+static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, true);
+}
+
+static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, false);
+}
+
+#define DECLARE_RTL_COND(name) \
+static bool name ## _check(struct rtl8169_private *); \
+ \
+static const struct rtl_cond name = { \
+ .check = name ## _check, \
+ .msg = #name \
+}; \
+ \
+static bool name ## _check(struct rtl8169_private *tp)
+
+DECLARE_RTL_COND(rtl_ocpar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(OCPAR) & OCPAR_FLAG;
+}
+
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
- return RTL_R32(OCPDR);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_R32(OCPDR) : ~0;
}
static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPDR, data);
RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
- break;
- }
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(ERIAR) & ERIAR_FLAG;
}
static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W8(ERIDR, cmd);
RTL_W32(ERIAR, 0x800010e8);
msleep(2);
- for (i = 0; i < 5; i++) {
- udelay(100);
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- }
+
+ if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
+ return;
ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -872,36 +965,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-static void rtl8168_driver_start(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_ocp_read_cond)
{
u16 reg;
- int i;
-
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
reg = rtl8168_get_ocp_reg(tp);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if (ocp_read(tp, 0x0f, reg) & 0x00000800)
- break;
- }
+ return ocp_read(tp, 0x0f, reg) & 0x00000800;
}
-static void rtl8168_driver_stop(struct rtl8169_private *tp)
+static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- u16 reg;
- int i;
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+}
- reg = rtl8168_get_ocp_reg(tp);
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
- break;
- }
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
}
static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +995,114 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
}
-static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
- int i;
+ if (reg & 0xffff0001) {
+ netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ return true;
+ }
+ return false;
+}
- RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+DECLARE_RTL_COND(rtl_ocp_gphy_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed writing to the specified
- * MII register.
- */
- if (!(RTL_R32(PHYAR) & 0x80000000))
- break;
- udelay(25);
+ return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
+}
+
+static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+}
+
+static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(GPHY_OCP, reg << 15);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
+}
+
+static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
+{
+ int val;
+
+ val = r8168_phy_ocp_read(tp, reg);
+ r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
+}
+
+static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
+}
+
+static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(OCPDR, reg << 15);
+
+ return RTL_R32(OCPDR);
+}
+
+#define OCP_STD_PHY_BASE 0xa400
+
+static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ if (reg == 0x1f) {
+ tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
+ return;
}
+
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
+}
+
+static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
+{
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
+}
+
+DECLARE_RTL_COND(rtl_phyar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(PHYAR) & 0x80000000;
+}
+
+static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
+
+ rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -933,23 +1110,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
udelay(20);
}
-static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i, value = -1;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int value;
- RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+ RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
+
+ value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ RTL_R32(PHYAR) & 0xffff : ~0;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed retrieving data from
- * the specified MII register.
- */
- if (RTL_R32(PHYAR) & 0x80000000) {
- value = RTL_R32(PHYAR) & 0xffff;
- break;
- }
- udelay(25);
- }
/*
* According to hardware specs a 20us delay is required after read
* complete indication, but before sending next command.
@@ -959,45 +1129,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(OCPDR, data |
- ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
- break;
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
-static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
- (value & OCPDR_DATA_MASK));
+ r8168dp_1_mdio_access(tp, reg,
+ OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
}
-static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+ r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1);
RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
-
- return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1172,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
}
-static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
r8168dp_2_mdio_start(ioaddr);
- r8169_mdio_write(ioaddr, reg_addr, value);
+ r8169_mdio_write(tp, reg, value);
r8168dp_2_mdio_stop(ioaddr);
}
-static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int value;
r8168dp_2_mdio_start(ioaddr);
- value = r8169_mdio_read(ioaddr, reg_addr);
+ value = r8169_mdio_read(tp, reg);
r8168dp_2_mdio_stop(ioaddr);
@@ -1036,12 +1199,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
{
- tp->mdio_ops.write(tp->mmio_addr, location, val);
+ tp->mdio_ops.write(tp, location, val);
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp->mmio_addr, location);
+ return tp->mdio_ops.read(tp, location);
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1235,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
return rtl_readphy(tp, location);
}
-static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+DECLARE_RTL_COND(rtl_ephyar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(EPHYAR) & EPHYAR_FLAG;
+}
+
+static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+
+ udelay(10);
}
-static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
- u16 value = 0xffff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
- value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static
-void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(ERIDR, val);
RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(ERIAR) & ERIAR_FLAG) {
- value = RTL_R32(ERIDR);
- break;
- }
- udelay(100);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(ERIDR) : ~0;
}
-static void
-rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
+ u32 m, int type)
{
u32 val;
- val = rtl_eri_read(ioaddr, addr, type);
- rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr, type);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
}
struct exgmac_reg {
@@ -1153,31 +1301,30 @@ struct exgmac_reg {
u32 val;
};
-static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
const struct exgmac_reg *r, int len)
{
while (len-- > 0) {
- rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
r++;
}
}
-static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+DECLARE_RTL_COND(rtl_efusear_cond)
{
- u8 value = 0xff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+ return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
+}
- for (i = 0; i < 300; i++) {
- if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
- value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
- break;
- }
- udelay(100);
- }
+static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- return value;
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1423,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else if (RTL_R8(PHYstatus) & _100bps) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
/* Reset packet filter */
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (RTL_R8(PHYstatus) & _10bps) {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x4d02, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
- 0x0060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
+ ERIAR_EXGMAC);
}
}
}
@@ -1784,6 +1931,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
+DECLARE_RTL_COND(rtl_counters_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CounterAddrLow) & CounterDump;
+}
+
static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1946,6 @@ static void rtl8169_update_counters(struct net_device *dev)
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
- int wait = 1000;
/*
* Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1963,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | CounterDump);
- while (wait--) {
- if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
- memcpy(&tp->counters, counters, sizeof(*counters));
- break;
- }
- udelay(10);
- }
+ if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
+ memcpy(&tp->counters, counters, sizeof(*counters));
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2042,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168G family. */
+ { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+
/* 8168F family. */
{ 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
{ 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2085,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
+ { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
{ 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2340,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= regno;
break;
case PHY_READ_EFUSE:
- predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ predata = rtl8168d_efuse_read(tp, regno);
index++;
break;
case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2780,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2638,7 +2791,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -2738,11 +2891,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -3010,8 +3162,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3266,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct phy_reg phy_reg_init[] = {
/* Channel estimation fine tune */
{ 0x1f, 0x0003 },
@@ -3189,7 +3339,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* eee setting */
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3361,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const u16 mac_ocp_patch[] = {
+ 0xe008, 0xe01b, 0xe01d, 0xe01f,
+ 0xe021, 0xe023, 0xe025, 0xe027,
+ 0x49d2, 0xf10d, 0x766c, 0x49e2,
+ 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
+
+ 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
+ 0xc707, 0x8ee1, 0x9d6c, 0xc603,
+ 0xbe00, 0xb416, 0x0076, 0xe86c,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+ 0x0000, 0xc602, 0xbe00, 0x0000,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+
+ 0x0000, 0x0000, 0x0000, 0x0000
+ };
+ u32 i;
+
+ /* Patch code for GPHY reset */
+ for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
+ r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
+ r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
+ r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+
+ rtl_apply_firmware(tp);
+
+ if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+
+ if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+
+ rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
+ rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+
+ r8168_phy_ocp_write(tp, 0xa436, 0x8012);
+ rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+
+ rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3455,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Disable ALDPS before setting firmware */
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3463,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3588,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8411_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_39:
+ rtl8106e_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_40:
+ rtl8168g_1_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_41:
default:
break;
}
@@ -3426,18 +3654,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
free_netdev(dev);
}
+DECLARE_RTL_COND(rtl_phy_reset_cond)
+{
+ return tp->phy_reset_pending(tp);
+}
+
static void rtl8169_phy_reset(struct net_device *dev,
struct rtl8169_private *tp)
{
- unsigned int i;
-
tp->phy_reset_enable(tp);
- for (i = 0; i < 100; i++) {
- if (!tp->phy_reset_pending(tp))
- return;
- msleep(1);
- }
- netif_err(tp, link, dev, "PHY reset failed\n");
+ rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
}
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3738,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
low >> 16 },
};
- rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3815,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ ops->write = r8168g_mdio_write;
+ ops->read = r8168g_mdio_read;
+ break;
default:
ops->write = r8169_mdio_write;
ops->read = r8169_mdio_read;
@@ -3608,6 +3839,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3761,7 +3995,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
- rtl_ephy_write(ioaddr, 0x19, 0xff64);
+ rtl_ephy_write(tp, 0x19, 0xff64);
if (rtl_wol_pll_power_down(tp))
return;
@@ -3830,6 +4064,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -3855,6 +4090,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_35:
case RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -4051,6 +4288,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4058,20 +4297,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
}
}
+DECLARE_RTL_COND(rtl_chipcmd_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(ChipCmd) & CmdReset;
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
- /* Soft reset the chip. */
RTL_W8(ChipCmd, CmdReset);
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4125,6 +4364,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
}
+DECLARE_RTL_COND(rtl_npq_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(TxPoll) & NPQ;
+}
+
+DECLARE_RTL_COND(rtl_txcfg_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TxConfig) & TXCFG_EMPTY;
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4137,16 +4390,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
- while (RTL_R8(TxPoll) & NPQ)
- udelay(20);
+ rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36 ||
tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
- udelay(100);
+ rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
udelay(100);
@@ -4352,15 +4605,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
if (tp->csi_ops.write)
- tp->csi_ops.write(tp->mmio_addr, addr, value);
+ tp->csi_ops.write(tp, addr, value);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- if (tp->csi_ops.read)
- return tp->csi_ops.read(tp->mmio_addr, addr);
- else
- return ~0;
+ return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
}
static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4381,73 +4631,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
rtl_csi_access_enable(tp, 0x27000000);
}
-static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+DECLARE_RTL_COND(rtl_csiar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CSIAR) & CSIAR_FLAG;
+}
+
+static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
-static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4492,13 +4725,14 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
+ int len)
{
u16 w;
while (len-- > 0) {
- w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
- rtl_ephy_write(ioaddr, e->offset, w);
+ w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(tp, e->offset, w);
e++;
}
}
@@ -4582,7 +4816,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168cp[] = {
{ 0x01, 0, 0x0001 },
{ 0x02, 0x0800, 0x1000 },
@@ -4593,7 +4826,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
__rtl_hw_start_8168cp(tp);
}
@@ -4644,14 +4877,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
{ 0x03, 0x0400, 0x0220 }
@@ -4659,7 +4891,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
__rtl_hw_start_8168cp(tp);
}
@@ -4727,8 +4959,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
const struct ephy_info *e = e_info_8168d_4 + i;
u16 w;
- w = rtl_ephy_read(ioaddr, e->offset);
- rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ w = rtl_ephy_read(tp, e->offset);
+ rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
}
rtl_enable_clock_request(pdev);
@@ -4756,7 +4988,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -4782,19 +5014,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4820,16 +5051,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4854,10 +5085,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4865,7 +5095,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
static void rtl_hw_start_8411(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
@@ -4875,10 +5104,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+ rtl_csi_access_enable_1(tp);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
}
static void rtl_hw_start_8168(struct net_device *dev)
@@ -4982,6 +5240,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8411(tp);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_start_8168g_1(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -5036,7 +5299,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5056,7 +5319,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
{
rtl_hw_start_8102e_2(tp);
- rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
+ rtl_ephy_write(tp, 0x03, 0xc2f9);
}
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5082,15 +5345,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_start_8105e_1(tp);
- rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+ rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
}
static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5109,18 +5370,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8106(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
static void rtl_hw_start_8101(struct net_device *dev)
@@ -5167,6 +5439,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_37:
rtl_hw_start_8402(tp);
break;
+
+ case RTL_GIGA_MAC_VER_39:
+ rtl_hw_start_8106(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5380,7 +5656,6 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
- netdev_reset_queue(tp->dev);
}
static void rtl_reset_work(struct rtl8169_private *tp)
@@ -5535,8 +5810,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
wmb();
@@ -5633,16 +5906,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-struct rtl_txc {
- int packets;
- int bytes;
-};
-
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- struct rtl8169_stats *tx_stats = &tp->tx_stats;
unsigned int dirty_tx, tx_left;
- struct rtl_txc txc = { 0, 0 };
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -5661,24 +5927,17 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- struct sk_buff *skb = tx_skb->skb;
-
- txc.packets++;
- txc.bytes += skb->len;
- dev_kfree_skb(skb);
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+ dev_kfree_skb(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets += txc.packets;
- tx_stats->bytes += txc.bytes;
- u64_stats_update_end(&tx_stats->syncp);
-
- netdev_completed_queue(dev, txc.packets, txc.bytes);
-
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
@@ -6435,6 +6694,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
return msi;
}
+DECLARE_RTL_COND(rtl_link_list_ready_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(MCU) & LINK_LIST_RDY;
+}
+
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 data;
+
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
+ return;
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data &= ~(1 << 14);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data |= (1 << 15);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+}
+
+static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_init_8168g(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int __devinit
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -6544,6 +6864,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_irq_disable(tp);
+ rtl_hw_initialize(tp);
+
rtl_hw_reset(tp);
rtl_ack_events(tp, 0xffff);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 79bf09b4197..af0b867a6cf 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_ARCH_R8A7740)
+static void sh_eth_select_mii(struct net_device *ndev)
+{
+ u32 value = 0x0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ value = 0x2;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ value = 0x1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ value = 0x0;
+ break;
+ default:
+ pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ value = 0x1;
+ break;
+ }
+
+ sh_eth_write(ndev, value, RMII_MII);
+}
+#endif
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
#define SH_ETH_HAS_BOTH_MODULES 1
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
}
static int sh_eth_is_gether(struct sh_eth_private *mdp);
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int cnt = 100;
+ int ret = 0;
if (sh_eth_is_gether(mdp)) {
sh_eth_write(ndev, 0x03, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt < 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
EDMR);
}
+
+out:
+ return ret;
}
static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
static void sh_eth_reset_hw_crc(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
mdelay(1);
}
-static void sh_eth_reset(struct net_device *ndev)
-{
- int cnt = 100;
-
- sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
-
- /* Table Init */
- sh_eth_write(ndev, 0x0, TDLAR);
- sh_eth_write(ndev, 0x0, TDFAR);
- sh_eth_write(ndev, 0x0, TDFXR);
- sh_eth_write(ndev, 0x0, TDFFR);
- sh_eth_write(ndev, 0x0, RDLAR);
- sh_eth_write(ndev, 0x0, RDFAR);
- sh_eth_write(ndev, 0x0, RDFXR);
- sh_eth_write(ndev, 0x0, RDFFR);
-
- /* Reset HW CRC register */
- sh_eth_reset_hw_crc(ndev);
-}
-
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tsu = 1,
#if defined(CONFIG_CPU_SUBTYPE_SH7734)
.hw_crc = 1,
+ .select_mii = 1,
#endif
};
+static int sh_eth_reset(struct net_device *ndev)
+{
+ int ret = 0;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+
+ /* Reset HW CRC register */
+ sh_eth_reset_hw_crc(ndev);
+
+ /* Select MII mode */
+ if (sh_eth_my_cpu_data.select_mii)
+ sh_eth_select_mii(ndev);
+out:
+ return ret;
+}
+
static void sh_eth_reset_hw_crc(struct net_device *ndev)
{
if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
#elif defined(CONFIG_ARCH_R8A7740)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long mii;
/* reset device */
sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
- switch (mdp->phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- mii = 2;
- break;
- case PHY_INTERFACE_MODE_MII:
- mii = 1;
- break;
- case PHY_INTERFACE_MODE_RMII:
- default:
- mii = 0;
- break;
- }
- sh_eth_write(ndev, mii, RMII_MII);
+ sh_eth_select_mii(ndev);
}
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
- int cnt = 100;
+ int ret = 0;
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
+
+out:
+ return ret;
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+ .select_mii = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
#if defined(SH_ETH_RESET_DEFAULT)
/* Chip Reset */
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+
+ return 0;
+}
+#else
+static int sh_eth_check_reset(struct net_device *ndev)
+{
+ int ret = 0;
+ int cnt = 100;
+
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0) {
+ printk(KERN_ERR "Device reset fail\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
}
#endif
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
}
}
kfree(mdp->rx_skbuff);
+ mdp->rx_skbuff = NULL;
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
}
}
kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sk_buff *skb;
struct sh_eth_rxdesc *rxdesc = NULL;
struct sh_eth_txdesc *txdesc = NULL;
- int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
- int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+ int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
+ int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
mdp->cur_rx = mdp->cur_tx = 0;
mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->rx_ring, 0, rx_ringsize);
/* build Rx ring buffer */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
}
}
- mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+ mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
}
/* Allocate all Rx descriptors. */
- rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
- tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
+ mdp->tx_ring = NULL;
+ mdp->rx_ring = NULL;
return ret;
}
-static int sh_eth_dev_init(struct net_device *ndev)
+static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
+{
+ int ringsize;
+
+ if (mdp->rx_ring) {
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
+ if (mdp->tx_ring) {
+ ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ mdp->tx_desc_dma);
+ mdp->tx_ring = NULL;
+ }
+}
+
+static int sh_eth_dev_init(struct net_device *ndev, bool start)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
- u_int32_t rx_int_var, tx_int_var;
u32 val;
/* Soft Reset */
- sh_eth_reset(ndev);
+ ret = sh_eth_reset(ndev);
+ if (ret)
+ goto out;
/* Descriptor format */
sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Frame recv control */
sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
- rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
- tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+ sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (start) {
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_start_queue(ndev);
+ netif_start_queue(ndev);
+ }
+out:
return ret;
}
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % TX_RING_SIZE;
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
freeNum++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
@@ -1016,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
- int entry = mdp->cur_rx % RX_RING_SIZE;
- int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ int entry = mdp->cur_rx % mdp->num_rx_ring;
+ int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
- entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
- entry = mdp->dirty_rx % RX_RING_SIZE;
+ entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
- if (entry >= RX_RING_SIZE - 1)
+ if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
@@ -1293,14 +1363,6 @@ other_irq:
return ret;
}
-static void sh_eth_timer(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- mod_timer(&mdp->timer, jiffies + (10 * HZ));
-}
-
/* PHY state control function */
static void sh_eth_adjust_link(struct net_device *ndev)
{
@@ -1499,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
+static void sh_eth_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ ring->rx_max_pending = RX_RING_MAX;
+ ring->tx_max_pending = TX_RING_MAX;
+ ring->rx_pending = mdp->num_rx_ring;
+ ring->tx_pending = mdp->num_tx_ring;
+}
+
+static int sh_eth_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ if (ring->tx_pending > TX_RING_MAX ||
+ ring->rx_pending > RX_RING_MAX ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_pending < RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_tx_disable(ndev);
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+ synchronize_irq(ndev->irq);
+ }
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+
+ /* Set new parameters */
+ mdp->num_rx_ring = ring->rx_pending;
+ mdp->num_tx_ring = ring->tx_pending;
+
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ netif_wake_queue(ndev);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
@@ -1509,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
+ .get_ringparam = sh_eth_get_ringparam,
+ .set_ringparam = sh_eth_set_ringparam,
};
/* network device open function */
@@ -1539,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev);
+ ret = sh_eth_dev_init(ndev, true);
if (ret)
goto out_free_irq;
@@ -1548,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- /* Set the timer to check for link beat. */
- init_timer(&mdp->timer);
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
-
return ret;
out_free_irq:
@@ -1577,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* tx_errors count up */
ndev->stats.tx_errors++;
- /* timer off */
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = 0;
rxdesc->addr = 0xBADF00D0;
@@ -1589,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
/* device init */
- sh_eth_dev_init(ndev);
-
- /* timer on */
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- add_timer(&mdp->timer);
+ sh_eth_dev_init(ndev, true);
}
/* Packet transmit function */
@@ -1612,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp))
dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1623,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- entry = mdp->cur_tx % TX_RING_SIZE;
+ entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
@@ -1637,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else
txdesc->buffer_length = skb->len;
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1654,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ringsize;
netif_stop_queue(ndev);
@@ -1673,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
/* free DMA buffer */
- ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
-
- /* free DMA buffer */
- ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+ sh_eth_free_dma_buffer(mdp);
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -2275,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ether_setup(ndev);
mdp = netdev_priv(ndev);
+ mdp->num_tx_ring = TX_RING_SIZE;
+ mdp->num_rx_ring = RX_RING_SIZE;
mdp->addr = ioremap(res->start, resource_size(res));
if (mdp->addr == NULL) {
ret = -ENOMEM;
@@ -2312,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* debug message level */
mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
- mdp->post_rx = POST_RX >> (devno << 1);
- mdp->post_fw = POST_FW >> (devno << 1);
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1fc5d1..bae84fd2e73 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
#define TX_TIMEOUT (5*HZ)
#define TX_RING_SIZE 64 /* Tx ring size */
#define RX_RING_SIZE 64 /* Rx ring size */
+#define TX_RING_MIN 64
+#define RX_RING_MIN 64
+#define TX_RING_MAX 1024
+#define RX_RING_MAX 1024
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
-enum phy_offsets {
- PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
- PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
- PHY_16 = 16,
-};
-
-/* PHY_CTRL */
-enum PHY_CTRL_BIT {
- PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
- PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
- PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
-};
-#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
-
-/* PHY_STAT */
-enum PHY_STAT_BIT {
- PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
- PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
- PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
- PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
-};
-
-/* PHY_ANA */
-enum PHY_ANA_BIT {
- PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
- PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
- PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001e,
-};
-/* PHY_ANL */
-enum PHY_ANL_BIT {
- PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
- PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
- PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
- PHY_L_SEL = 0x001f,
-};
-
-/* PHY_ANE */
-enum PHY_ANE_BIT {
- PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
- PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
-};
-
-/* DM9161 */
-enum PHY_16_BIT {
- PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
- PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
- PHY_16_TXselect = 0x0400,
- PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
- PHY_16_Force100LNK = 0x0080,
- PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
- PHY_16_RPDCTR_EN = 0x0010,
- PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
- PHY_16_Sleepmode = 0x0002,
- PHY_16_RemoteLoopOut = 0x0001,
-};
-
-#define POST_RX 0x08
-#define POST_FW 0x04
-#define POST0_RX (POST_RX)
-#define POST0_FW (POST_FW)
-#define POST1_RX (POST_RX >> 2)
-#define POST1_FW (POST_FW >> 2)
-#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
-
/* ARSTR */
enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
+ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
};
struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
+ u32 num_rx_ring;
+ u32 num_tx_ring;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- struct timer_list timer;
spinlock_t lock;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- u32 rx_int_var, tx_int_var; /* interrupt control variables */
- char post_rx; /* POST receive */
- char post_fw; /* POST forward */
- struct net_device_stats tsu_stats; /* TSU forward status */
int port; /* for TSU */
int vlan_num_ids; /* for VLAN tag filter */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b95f2e1b33f..70554a1b2b0 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
- if (pci_dma_supported(pci_dev, dma_mask)) {
- rc = pci_set_dma_mask(pci_dev, dma_mask);
+ if (dma_supported(&pci_dev->dev, dma_mask)) {
+ rc = dma_set_mask(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+ rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
if (rc) {
- /* pci_set_consistent_dma_mask() is not *allowed* to
- * fail with a mask that pci_set_dma_mask() accepted,
+ /* dma_set_coherent_mask() is not *allowed* to
+ * fail with a mask that dma_set_mask() accepted,
* but just in case...
*/
netif_err(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d725a8fbe1a..182dbe2cc6e 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -136,10 +136,10 @@ enum efx_loopback_mode {
*
* Reset methods are numbered in order of increasing scope.
*
- * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
- * @RESET_TYPE_ALL: reset everything but PCI core blocks
- * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
- * @RESET_TYPE_DISABLE: disable NIC
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 03ded364c8d..10536f93b56 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strncpy(ethtool_strings[i].name,
+ strlcpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
break;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 3a1ca2bd154..12b573a8e82 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -25,9 +25,12 @@
#include "io.h"
#include "phy.h"
#include "workarounds.h"
+#include "selftest.h"
/* Hardware control for SFC4000 (aka Falcon). */
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
* 8 KB, 16-bit address, 32 B write block */
@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};
-static int falcon_b0_test_registers(struct efx_nic *efx)
+static int
+falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests));
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+ int rc, rc2;
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+ /* We need the 312 clock from the PHY to test the XMAC
+ * registers, so move into XGMII loopback if available */
+ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+ efx->loopback_mode = LOOPBACK_XGMII;
+ else
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_reset_down(efx, reset_method);
+
+ tests->registers =
+ efx_nic_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
+ ? -1 : 1;
+
+ rc = falcon_reset_hw(efx, reset_method);
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
- .test_registers = falcon_b0_test_registers,
+ .test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
.revision = EFX_REV_FALCON_B0,
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 6106ef15dee..8333865d4c9 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */
- mac_stats->tx_good_bytes =
- (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- mac_stats->rx_bad_bytes =
- (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+ mac_stats->tx_control * 64);
+ efx_update_diff_stat(&mac_stats->rx_bad_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+ mac_stats->rx_control * 64);
}
void falcon_poll_xmac(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index fea7f730067..c3fd61f0a95 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth;
+ unsigned int filter_idx, depth = 0;
u32 key;
int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 17b6463e459..fc5e7bbcbc9 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
u8 inbuf[MC_CMD_REBOOT_IN_LEN];
- /* Atomically reboot the mcfw out of the assertion handler */
+ /* If the MC is running debug firmware, it might now be
+ * waiting for a debugger to attach, but we just want it to
+ * reboot. We set a flag that makes the command a no-op if it
+ * has already done so. We don't know what return code to
+ * expect (0 or -EIO), so ignore it.
+ */
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index fb7f65b59eb..1d552f0664d 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -222,6 +222,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
attr->index = index;
attr->type = type;
attr->limit_value = limit_value;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = reader;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f08c9..db4beed9766 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
-/**
- * MCDI version 1
+/* MCDI version 1
*
* Each MCDI request starts with an MCDI_HEADER, which is a 32byte
* structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e575359af1..cd9c0a98969 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -68,6 +68,8 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+struct efx_self_tests;
+
/**
* struct efx_special_buffer - An Efx special buffer
* @addr: CPU base address of the buffer
@@ -100,7 +102,7 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if pci_unmap_single should be used.
+ * @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
@@ -527,7 +529,7 @@ struct efx_phy_operations {
};
/**
- * @enum efx_phy_mode - PHY operating mode flags
+ * enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
* @PHY_MODE_LOW_POWER: set to low power through MDIO
@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_registers: Test read/write functionality of control registers
+ * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
* @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
@@ -946,7 +949,7 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
- int (*test_registers)(struct efx_nic *efx);
+ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
int revision;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 4a9a5beec8f..326d799762d 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
- /* Falcon should be in loopback to isolate the XMAC from the PHY */
- WARN_ON(!LOOPBACK_INTERNAL(efx));
-
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
- buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
- &buffer->dma_addr);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_ATOMIC);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
- pci_free_consistent(efx->pci_dev, buffer->len,
- buffer->addr, buffer->dma_addr);
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f48ccf6bb3b..bab5cd9f574 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx);
extern int falcon_reconfigure_xmac(struct efx_nic *efx);
extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff..719319b89d7 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->flags = 0;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
skb->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev,
- rx_buf->dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ rx_buf->dma_addr))) {
dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL;
return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
- dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
- pci_unmap_page(efx->pci_dev,
+ dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
- rx_buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
}
}
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
+ *
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index de4c0069f5b..96068d15b60 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
return rc;
}
-static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
-{
- int rc = 0;
-
- /* Test register access */
- if (efx->type->test_registers) {
- rc = efx->type->test_registers(efx);
- tests->registers = rc ? -1 : 1;
- }
-
- return rc;
-}
-
/**************************************************************************
*
* Interrupt and event queue testing
@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
- enum reset_type reset_method = RESET_TYPE_INVISIBLE;
- int rc_test = 0, rc_reset = 0, rc;
+ int rc_test = 0, rc_reset, rc;
efx_selftest_async_cancel(efx);
@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
*/
netif_device_detach(efx->net_dev);
- mutex_lock(&efx->mac_lock);
- if (efx->loopback_modes) {
- /* We need the 312 clock from the PHY to test the XMAC
- * registers, so move into XGMII loopback if available */
- if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
- efx->loopback_mode = LOOPBACK_XGMII;
- else
- efx->loopback_mode = __ffs(efx->loopback_modes);
- }
-
- __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- /* free up all consumers of SRAM (including all the queues) */
- efx_reset_down(efx, reset_method);
-
- rc = efx_test_chip(efx, tests);
- if (rc && !rc_test)
- rc_test = rc;
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
- /* reset the chip to recover from the register test */
- rc_reset = efx->type->reset(efx, reset_method);
+ if ((tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
-
- rc = efx_reset_up(efx, reset_method, rc_reset == 0);
- if (rc && !rc_reset)
- rc_reset = rc;
-
- if (rc_reset) {
- netif_err(efx, drv, efx->net_dev,
- "Unable to recover from chip test\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- return rc_reset;
- }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test)
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 9f8d7cea396..6bafd216e55 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,10 +25,12 @@
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "selftest.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
};
-static int siena_test_registers(struct efx_nic *efx)
+static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests));
+ enum reset_type reset_method = reset_method;
+ int rc, rc2;
+
+ efx_reset_down(efx, reset_method);
+
+ /* Reset the chip immediately so that it is completely
+ * quiescent regardless of what any VF driver does.
+ */
+ rc = siena_reset_hw(efx, reset_method);
+ if (rc)
+ goto out;
+
+ tests->registers =
+ efx_nic_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
+ ? -1 : 1;
+
+ rc = siena_reset_hw(efx, reset_method);
+out:
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
- mac_stats->tx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
- mac_stats->rx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->rx_good_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
- .test_registers = siena_test_registers,
+ .test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 94d0365b31c..9b225a7769f 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
buffer->unmap_single = false;
}
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
- struct pci_dev *pci_dev = efx->pci_dev;
+ struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
unsigned int len, unmap_len = 0, fill_level, insert_ptr;
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->txq_entries - 1 - fill_level;
- /* Map for DMA. Use pci_map_single rather than pci_map_page
+ /* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
*/
unmap_single = true;
- dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
- goto pci_err;
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ goto dma_err;
/* Store fields for marking in the per-fragment final
* descriptor */
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i++;
/* Map for DMA */
unmap_single = false;
- dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE);
}
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
- pci_err:
+ dma_err:
netif_err(efx, tx_err, efx->net_dev,
" TX queue %d could not map skb with %d bytes %d "
"fragments for DMA\n", tx_queue->queue, skb->len,
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */
if (unmap_len) {
if (unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
}
return rc;
@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
protocol);
if (protocol == htons(ETH_P_8021Q)) {
- /* Find the encapsulated protocol; reset network header
- * and transport header based on that. */
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
- skb_set_network_header(skb, sizeof(*veh));
- if (protocol == htons(ETH_P_IP))
- skb_set_transport_header(skb, sizeof(*veh) +
- 4 * ip_hdr(skb)->ihl);
- else if (protocol == htons(ETH_P_IPV6))
- skb_set_transport_header(skb, sizeof(*veh) +
- sizeof(struct ipv6hdr));
}
if (protocol == htons(ETH_P_IP)) {
@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{
-
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
u8 *base_kva, *kva;
- base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
if (base_kva == NULL) {
netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
"Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
- /* pci_alloc_consistent() allocates pages. */
+ /* dma_alloc_coherent() allocates pages. */
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh,
- struct pci_dev *pci_dev)
+ struct device *dma_dev)
{
struct efx_tso_header **p;
unsigned long base_kva;
@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p = &(*p)->next;
}
- pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+ dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
}
static struct efx_tso_header *
@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if (unlikely(!tsoh))
return NULL;
- tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
TSOH_BUFFER(tsoh), header_len,
- PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
- tsoh->dma_addr))) {
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static void
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
{
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr, tsoh->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
kfree(tsoh);
}
@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_page(tx_queue->efx->pci_dev,
+ dma_unmap_page(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
buffer->len = 0;
@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->packet_space = st->full_packet_size;
st->out_len = skb->len - st->header_len;
st->unmap_len = 0;
st->unmap_single = false;
@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int hl = st->header_len;
int len = skb_headlen(skb) - hl;
- st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
- len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
+ len, DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = true;
st->unmap_len = len;
st->in_len = len;
@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer->continuation = !end_of_packet;
if (st->in_len == 0) {
- /* Transfer ownership of the pci mapping */
+ /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
buffer->unmap_single = st->unmap_single;
st->unmap_len = 0;
@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err:
netif_err(efx, tx_err, efx->net_dev,
- "Out of memory for TSO headers, or PCI mapping error\n");
+ "Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb);
unwind:
/* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) {
if (state.unmap_single)
- pci_unmap_single(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
else
- pci_unmap_page(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
}
efx_enqueue_unwind(tx_queue);
@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
- tx_queue->efx->pci_dev);
+ &tx_queue->efx->pci_dev->dev);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d99f78..b5ba3084c7f 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
unsigned long *rxr;
u32 w0, err;
- rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rxr = ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = (unsigned long *) ip->rxr;
+ rxr = ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f5d46..8d15f7a74b4 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc911x_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
return reg & PMT_CTRL_PHY_RST_;
}
-/*
+/**
* smc911x_phy_powerdown - powerdown phy
* @dev: net device
* @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
SMC_SET_PHY_BMCR(lp, phy, bmcr);
}
-/*
+/**
* smc911x_phy_check_media - check the media status and adjust BMCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee44935501..318adc935a5 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
return bmcr & BMCR_RESET;
}
-/*
+/**
* smc_phy_powerdown - powerdown phy
* @dev: net device
*
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
}
-/*
+/**
* smc_phy_check_media - check the media status and adjust TCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 1466e5d2af4..62d1baf111e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
smsc911x_mac_write(pdata, ADDRL, mac_low32);
}
+static void smsc911x_disable_irq_chip(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
spin_unlock_irq(&pdata->mac_lock);
/* Initialise irqs, but leave all sources disabled */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
/* Set interrupt deassertion to 100uS */
intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
if (smsc911x_soft_reset(pdata))
return -ENODEV;
- /* Disable all interrupt sources until we bring the device up */
- smsc911x_reg_write(pdata, INT_EN, 0);
-
ether_setup(dev);
dev->flags |= IFF_MULTICAST;
netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_CFG, intcfg);
/* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
@@ -2485,7 +2488,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(dev);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
SMSC_TRACE(pdata, probe,
- "MAC Address is set to random_ether_addr");
+ "MAC Address is set to eth_random_addr");
}
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21f6c9..1fcd914ec39 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_io_4;
/* descriptors are aligned due to the nature of pci_alloc_consistent */
- pd->tx_ring = (struct smsc9420_dma_desc *)
- (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6e94f..e2d083228f3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
unsigned long poll_n;
unsigned long sched_timer_n;
unsigned long normal_irq_n;
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_receive_pmt_irq_n;
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
};
/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
handle_tx_rx = 3,
};
+enum core_specific_irq_mask {
+ core_mmc_tx_irq = 1,
+ core_mmc_rx_irq = 2,
+ core_mmc_rx_csum_offload_irq = 4,
+ core_irq_receive_pmt_irq = 8,
+ core_irq_tx_path_in_lpi_mode = 16,
+ core_irq_tx_path_exit_lpi_mode = 32,
+ core_irq_rx_path_in_lpi_mode = 64,
+ core_irq_rx_path_exit_lpi_mode = 128,
+};
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
+#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
+ void (*set_eee_mode) (void __iomem *ioaddr);
+ void (*reset_eee_mode) (void __iomem *ioaddr);
+ void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
+ void (*set_eee_pls) (void __iomem *ioaddr, int link);
};
struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf4ed7..f90fcb5f957 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
enum dwmac1000_irq_status {
+ lpiis_irq = 0x400,
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
power_down = 0x00000001,
};
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
(reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02f15c..bfe02260549 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ int status = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ if ((intr_status & mmc_tx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ status |= core_mmc_tx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ status |= core_mmc_rx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ status |= core_mmc_rx_csum_offload_irq;
+ }
if (unlikely(intr_status & pmt_irq)) {
- CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
+ status |= core_irq_receive_pmt_irq;
}
+ /* MAC trx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpiis_irq) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
+ status |= core_irq_tx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
+ status |= core_irq_tx_path_exit_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
+ status |= core_irq_rx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
+ status |= core_irq_rx_path_exit_lpi_mode;
+ }
+ }
+
+ return status;
+}
+
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state. */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
}
static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4eed2b..f83210e7c22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static void dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr)
{
- return;
+ return 0;
}
static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f9cfd..e678ce39d01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dc20c56efc9..ab4c376cb27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -87,6 +87,12 @@ struct stmmac_priv {
#endif
int clk_csr;
int synopsys_id;
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
};
extern int phyaddr;
@@ -104,6 +110,8 @@ int stmmac_dvr_remove(struct net_device *ndev);
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
void __iomem *addr);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_HAVE_CLK
static inline int stmmac_clk_enable(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce431846fc6..76fd61aa005 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(poll_n),
STMMAC_STAT(sched_timer_n),
STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+ else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ea3003edde1..f6b04c1a367 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
phydev->speed);
}
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Check and enter in LPI mode */
+ if ((priv->dirty_tx == priv->cur_tx) &&
+ (priv->tx_path_in_lpi_mode == false))
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(unsigned long arg)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+}
+
+/**
+ * stmmac_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->dma_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ goto out;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+out:
+ return ret;
+}
+
+static void stmmac_eee_adjust(struct stmmac_priv *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
phydev->addr, phydev->link);
spin_lock_irqsave(&priv->lock, flags);
+
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ stmmac_eee_adjust(priv);
+
spin_unlock_irqrestore(&priv->lock, flags);
DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[MII_BUS_ID_SIZE + 3];
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
priv->plat->bus_id);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
+ interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p);
- entry = (++priv->dirty_tx) % txsize;
+ priv->dirty_tx++;
}
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+ }
spin_unlock(&priv->tx_lock);
}
@@ -1027,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Request the IRQ lines */
+ if (priv->lpi_irq != -ENXIO) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto open_error_lpiirq;
+ }
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1062,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+ priv->eee_enabled = stmmac_eee_init(priv);
+
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
+open_error_lpiirq:
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+
open_error_wolirq:
free_irq(dev->irq, dev);
@@ -1093,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
/* Stop and disconnect the PHY */
if (priv->phydev) {
phy_stop(priv->phydev);
@@ -1115,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
+ if (priv->lpi_irq != -ENXIO)
+ free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1164,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&priv->tx_lock);
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1311,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
display_ring(priv->dma_rx, rxsize);
}
#endif
- count = 0;
while (!priv->hw->desc->get_rx_owner(p)) {
int status;
@@ -1544,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->plat->has_gmac)
- /* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
+ /* To handle GMAC own interrupts */
+ if (priv->plat->has_gmac) {
+ int status = priv->hw->mac->host_irq_status((void __iomem *)
+ dev->base_addr);
+ if (unlikely(status)) {
+ if (status & core_mmc_tx_irq)
+ priv->xstats.mmc_tx_irq_n++;
+ if (status & core_mmc_rx_irq)
+ priv->xstats.mmc_rx_irq_n++;
+ if (status & core_mmc_rx_csum_offload_irq)
+ priv->xstats.mmc_rx_csum_offload_irq_n++;
+ if (status & core_irq_receive_pmt_irq)
+ priv->xstats.irq_receive_pmt_irq_n++;
+
+ /* For LPI we need to save the tx status */
+ if (status & core_irq_tx_path_in_lpi_mode) {
+ priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & core_irq_tx_path_exit_lpi_mode) {
+ priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & core_irq_rx_path_in_lpi_mode)
+ priv->xstats.irq_rx_path_in_lpi_mode_n++;
+ if (status & core_irq_rx_path_exit_lpi_mode)
+ priv->xstats.irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+ /* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
@@ -2133,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ if (kstrtoint(opt + 6, 0, &debug))
goto err;
} else if (!strncmp(opt, "phyaddr:", 8)) {
- if (strict_strtoul(opt + 8, 0,
- (unsigned long *)&phyaddr))
+ if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
} else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_txsize))
+ if (kstrtoint(opt + 11, 0, &dma_txsize))
goto err;
} else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_rxsize))
+ if (kstrtoint(opt + 11, 0, &dma_rxsize))
goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&buf_sz))
+ if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
} else if (!strncmp(opt, "tc:", 3)) {
- if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ if (kstrtoint(opt + 3, 0, &tc))
goto err;
} else if (!strncmp(opt, "watchdog:", 9)) {
- if (strict_strtoul(opt + 9, 0,
- (unsigned long *)&watchdog))
+ if (kstrtoint(opt + 9, 0, &watchdog))
goto err;
} else if (!strncmp(opt, "flow_ctrl:", 10)) {
- if (strict_strtoul(opt + 10, 0,
- (unsigned long *)&flow_ctrl))
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
goto err;
} else if (!strncmp(opt, "pause:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
#ifdef CONFIG_STMMAC_TIMER
} else if (!strncmp(opt, "tmrate:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&tmrate))
+ if (kstrtoint(opt + 7, 0, &tmrate))
goto err;
#endif
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index cf826e6b6aa..13afb8edfad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
}
/**
- * stmmac_dvr_remove
+ * stmmac_pci_remove
*
* @pdev: platform device pointer
* Description: this function calls the main to free the net resources
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 680d2b8dfe2..cd01ee7ecef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -49,7 +49,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* are provided. All other properties should be added
* once needed on other platforms.
*/
- if (of_device_is_compatible(np, "st,spear600-gmac")) {
+ if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac")) {
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -156,6 +158,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->wol_irq == -ENXIO)
priv->wol_irq = priv->dev->irq;
+ priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +194,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- iounmap((void *)priv->ioaddr);
+ iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -250,7 +254,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac", },
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8c726b7004d..c2a0fe39326 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3335,6 +3335,10 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
+ if (!addr) {
+ __free_page(page);
+ return -ENOMEM;
+ }
niu_hash_page(rp, page, addr);
if (rp->rbr_blocks_per_page > 1)
@@ -3513,7 +3517,7 @@ static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
err = 0;
while (index < (rp->rbr_table_size - blocks_per_page)) {
err = niu_rbr_add_page(np, rp, mask, index);
- if (err)
+ if (unlikely(err))
break;
index += blocks_per_page;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc57edb..967fe8cb476 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
continue;
bp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
RX_BUF_ALLOC_SIZE - 34,
DMA_FROM_DEVICE);
bp->rx_skbs[elem] = new_skb;
- new_skb->dev = bp->dev;
skb_put(new_skb, ETH_FRAME_LEN);
skb_reserve(new_skb, 34);
this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab75583..9ae12d0c963 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
if (likely(skb)) {
unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb_reserve(skb, offset);
- skb->dev = dev;
}
return skb;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4683e..73f341b8bef 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
static void happy_meal_init_rings(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
- struct net_device *dev = hp->dev;
int i;
HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
continue;
}
hp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- new_skb->dev = dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040d84a..aeded7ff1c8 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a6932cab..6ce9edd95c0 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
#define bdx_disable_interrupts(priv) \
do { WRITE_REG(priv, regIMR, 0); } while (0)
-/* bdx_fifo_init
- * create TX/RX descriptor fifo for host-NIC communication.
+/**
+ * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
+ * @priv: NIC private structure
+ * @f: fifo to initialize
+ * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
+ * @reg_XXX: offsets of registers relative to base address
+ *
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
- * @priv - NIC private structure
- * @f - fifo to initialize
- * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX - offsets of registers relative to base address
*
* Returns 0 on success, negative value on failure
*
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
RET(0);
}
-/* bdx_fifo_free - free all resources used by fifo
- * @priv - NIC private structure
- * @f - fifo to release
+/**
+ * bdx_fifo_free - free all resources used by fifo
+ * @priv: NIC private structure
+ * @f: fifo to release
*/
static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
RET();
}
-/*
+/**
* bdx_link_changed - notifies OS about hw link state.
- * @bdx_priv - hw adapter structure
+ * @priv: hw adapter structure
*/
static void bdx_link_changed(struct bdx_priv *priv)
{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
}
-/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
- * @irq - interrupt number
- * @ndev - network device
- * @regs - CPU registers
+/**
+ * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
+ * @irq: interrupt number
+ * @dev: network device
*
* Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
*
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* bdx_fw_load - loads firmware to NIC
- * @priv - NIC private structure
+/**
+ * bdx_fw_load - loads firmware to NIC
+ * @priv: NIC private structure
+ *
* Firmware is loaded via TXD fifo, so it must be initialized first.
* Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
* can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
RET();
}
-/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
- * @priv - NIC private structure
+/**
+ * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
+ * @priv: NIC private structure
*/
static int bdx_hw_start(struct bdx_priv *priv)
{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EOPNOTSUPP);
}
-/*
+/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
- * by passing VLAN filter table to hardware
- * @ndev network device
- * @vid VLAN vid
- * @op add or kill operation
+ * @ndev: network device
+ * @vid: VLAN vid
+ * @op: add or kill operation
+ *
+ * Passes VLAN filter table to hardware
*/
static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
RET();
}
-/*
+/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
- * @ndev network device
- * @vid VLAN vid to add
+ * @ndev: network device
+ * @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
return 0;
}
-/*
+/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
- * @ndev network device
- * @vid VLAN vid to kill
+ * @ndev: network device
+ * @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
* Rx Init *
*************************************************************************/
-/* bdx_rx_init - initialize RX all related HW and SW resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_init - initialize RX all related HW and SW resources
+ * @priv: NIC private structure
*
* Returns 0 on success, negative value on failure
*
@@ -1016,9 +1023,10 @@ err_mem:
return -ENOMEM;
}
-/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
- * @priv - NIC private structure
- * @f - RXF fifo
+/**
+ * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
+ * @priv: NIC private structure
+ * @f: RXF fifo
*/
static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
}
}
-/* bdx_rx_free - release all Rx resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_free - release all Rx resources
+ * @priv: NIC private structure
+ *
* It assumes that Rx is desabled in HW
*/
static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
* Rx Engine *
*************************************************************************/
-/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+/**
+ * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ *
* It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
* skb's virtual and physical addresses are stored in skb db.
* To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
RET();
}
-/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+/**
+ * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
* NOTE: a special treatment is given to non-continuous descriptors
* that start near the end, wraps around and continue at the beginning. a second
* part is copied right after the first, and then descriptor is interpreted as
* normal. fifo has an extra space to allow such operations
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ * @budget: maximum number of packets to receive
*/
/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
return db->size - taken;
}
-/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
- * @d - tx data base
- * @ptr - read or write pointer
+/**
+ * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
+ * @db: tx data base
+ * @pptr: read or write pointer
*/
static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
*pptr = db->start;
}
-/* bdx_tx_db_inc_rptr - increment read pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_rptr - increment read pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_rptr(struct txdb *db)
{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
__bdx_tx_db_ptr_next(db, &db->rptr);
}
-/* bdx_tx_db_inc_rptr - increment write pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_wptr - increment write pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_wptr(struct txdb *db)
{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
a result of write */
}
-/* bdx_tx_db_init - creates and initializes tx db
- * @d - tx data base
- * @sz_type - size of tx fifo
+/**
+ * bdx_tx_db_init - creates and initializes tx db
+ * @d: tx data base
+ * @sz_type: size of tx fifo
+ *
* Returns 0 on success, error code otherwise
*/
static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
return 0;
}
-/* bdx_tx_db_close - closes tx db and frees all memory
- * @d - tx data base
+/**
+ * bdx_tx_db_close - closes tx db and frees all memory
+ * @d: tx data base
*/
static void bdx_tx_db_close(struct txdb *d)
{
@@ -1463,9 +1483,11 @@ static struct {
u16 qwords; /* qword = 64 bit */
} txd_sizes[MAX_SKB_FRAGS + 1];
-/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
- * @priv - NIC private structure
- * @skb - socket buffer to map
+/**
+ * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: TX descriptor to use
*
* It makes dma mappings for skb's data blocks and writes them to PBL of
* new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
return -ENOMEM;
}
-/*
+/**
* bdx_tx_space - calculates available space in TX fifo
- * @priv - NIC private structure
+ * @priv: NIC private structure
+ *
* Returns available space in TX fifo in bytes
*/
static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
return fsize;
}
-/* bdx_tx_transmit - send packet to NIC
- * @skb - packet to send
- * ndev - network device assigned to NIC
+/**
+ * bdx_tx_transmit - send packet to NIC
+ * @skb: packet to send
+ * @ndev: network device assigned to NIC
* Return codes:
* o NETDEV_TX_OK everything ok.
* o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
- * @priv - bdx adapter
+/**
+ * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
+ * @priv: bdx adapter
+ *
* It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
* that those packets were sent
*/
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
spin_unlock(&priv->tx_lock);
}
-/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+/**
+ * bdx_tx_free_skbs - frees all skbs from TXD fifo.
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
bdx_tx_db_close(&priv->txdb);
}
-/* bdx_tx_push_desc - push descriptor to TxD fifo
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc - push descriptor to TxD fifo
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* Pushes desc to TxD fifo and overlaps it if needed.
* NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
}
-/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* NOTE: this func does check for available space and, if necessary, waits for
* NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6685bbb5705..1e5d85b06e7 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/cpsw.h>
@@ -494,11 +495,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- ret = clk_enable(priv->clk);
- if (ret < 0) {
- dev_err(priv->dev, "unable to turn on device clock\n");
- return ret;
- }
+ pm_runtime_get_sync(&priv->pdev->dev);
reg = __raw_readl(&priv->regs->id_ver);
@@ -569,7 +566,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_carrier_off(priv->ndev);
cpsw_ale_stop(priv->ale);
for_each_slave(priv, cpsw_slave_stop, priv);
- clk_disable(priv->clk);
+ pm_runtime_put_sync(&priv->pdev->dev);
return 0;
}
@@ -748,7 +745,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
pr_info("Detected MACID = %pM", priv->mac_addr);
} else {
- random_ether_addr(priv->mac_addr);
+ eth_random_addr(priv->mac_addr);
pr_info("Random MACID = %pM", priv->mac_addr);
}
@@ -763,10 +760,12 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
- priv->clk = clk_get(&pdev->dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(priv->dev, "failed to get device clock)\n");
- ret = -EBUSY;
+ dev_err(&pdev->dev, "fck is not found\n");
+ ret = -ENODEV;
+ goto clean_slave_ret;
}
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -935,6 +934,8 @@ clean_cpsw_iores_ret:
resource_size(priv->cpsw_res));
clean_clk_ret:
clk_put(priv->clk);
+clean_slave_ret:
+ pm_runtime_disable(&pdev->dev);
kfree(priv->slaves);
clean_ndev_ret:
free_netdev(ndev);
@@ -959,6 +960,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
resource_size(priv->cpsw_res));
release_mem_region(priv->cpsw_ss_res->start,
resource_size(priv->cpsw_ss_res));
+ pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
free_netdev(ndev);
@@ -973,6 +975,8 @@ static int cpsw_suspend(struct device *dev)
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -981,6 +985,7 @@ static int cpsw_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
if (netif_running(ndev))
cpsw_ndo_open(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5d7ec..fce89a0ab06 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -57,7 +57,12 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -339,6 +344,9 @@ struct emac_priv {
u32 rx_addr_type;
atomic_t cur_tx;
const char *phy_id;
+#ifdef CONFIG_OF
+ struct device_node *phy_node;
+#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -346,10 +354,6 @@ struct emac_priv {
void (*int_disable) (void);
};
-/* clock frequency for EMAC */
-static struct clk *emac_clk;
-static unsigned long emac_bus_frequency;
-
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
"No error", "SOP error", "Ownership bit not set in SOP buffer",
@@ -375,7 +379,7 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * emac_dump_regs - Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
*
* Executes ethtool set cmd & sets phy mode
@@ -466,7 +470,7 @@ static void emac_dump_regs(struct emac_priv *priv)
}
/**
- * emac_get_drvinfo: Get EMAC driver information
+ * emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
*
@@ -481,7 +485,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings: Get EMAC settings
+ * emac_get_settings - Get EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -500,7 +504,7 @@ static int emac_get_settings(struct net_device *ndev,
}
/**
- * emac_set_settings: Set EMAC settings
+ * emac_set_settings - Set EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -518,7 +522,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
}
/**
- * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -536,7 +540,7 @@ static int emac_get_coalesce(struct net_device *ndev,
}
/**
- * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -614,11 +618,9 @@ static int emac_set_coalesce(struct net_device *ndev,
}
-/**
- * ethtool_ops: DaVinci EMAC Ethtool structure
+/* ethtool_ops: DaVinci EMAC Ethtool structure
*
* Ethtool support for EMAC adapter
- *
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
@@ -631,7 +633,7 @@ static const struct ethtool_ops ethtool_ops = {
};
/**
- * emac_update_phystatus: Update Phy status
+ * emac_update_phystatus - Update Phy status
* @priv: The DaVinci EMAC private adapter structure
*
* Updates phy status and takes action for network queue if required
@@ -697,7 +699,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
}
/**
- * hash_get: Calculate hash value from mac address
+ * hash_get - Calculate hash value from mac address
* @addr: mac address to delete from hash table
*
* Calculates hash value from mac address
@@ -723,9 +725,9 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add: Hash function to add mac addr from hash table
+ * hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
@@ -765,9 +767,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del: Hash function to delete mac addr from hash table
+ * hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
@@ -807,7 +809,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
#define EMAC_ALL_MULTI_CLR 3
/**
- * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
* mac_addr: mac address to set
@@ -855,7 +857,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
}
/**
- * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
* @ndev: The DaVinci EMAC network adapter
*
* Set multicast addresses in EMAC adapter
@@ -901,7 +903,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
*************************************************************************/
/**
- * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Disable EMAC interrupt on the adapter
@@ -931,7 +933,7 @@ static void emac_int_disable(struct emac_priv *priv)
}
/**
- * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Enable EMAC interrupt on the adapter
@@ -967,7 +969,7 @@ static void emac_int_enable(struct emac_priv *priv)
}
/**
- * emac_irq: EMAC interrupt handler
+ * emac_irq - EMAC interrupt handler
* @irq: interrupt number
* @dev_id: EMAC network adapter data structure ptr
*
@@ -1060,7 +1062,7 @@ static void emac_tx_handler(void *token, int len, int status)
}
/**
- * emac_dev_xmit: EMAC Transmit function
+ * emac_dev_xmit - EMAC Transmit function
* @skb: SKB pointer
* @ndev: The DaVinci EMAC network adapter
*
@@ -1111,7 +1113,7 @@ fail_tx:
}
/**
- * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system detects that a skb timeout period has expired
@@ -1138,7 +1140,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
}
/**
- * emac_set_type0addr: Set EMAC Type0 mac address
+ * emac_set_type0addr - Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1165,7 +1167,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type1addr: Set EMAC Type1 mac address
+ * emac_set_type1addr - Set EMAC Type1 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1187,7 +1189,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type2addr: Set EMAC Type2 mac address
+ * emac_set_type2addr - Set EMAC Type2 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1213,7 +1215,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
}
/**
- * emac_setmac: Set mac address in the adapter (internal function)
+ * emac_setmac - Set mac address in the adapter (internal function)
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1242,7 +1244,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_dev_setmac_addr: Set mac address in the adapter
+ * emac_dev_setmac_addr - Set mac address in the adapter
* @ndev: The DaVinci EMAC network adapter
* @addr: MAC address to set in device
*
@@ -1277,7 +1279,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
* Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1349,7 @@ static int emac_hw_enable(struct emac_priv *priv)
}
/**
- * emac_poll: EMAC NAPI Poll function
+ * emac_poll - EMAC NAPI Poll function
* @ndev: The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
@@ -1430,7 +1432,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * emac_poll_controller: EMAC Poll controller function
+ * emac_poll_controller - EMAC Poll controller function
* @ndev: The DaVinci EMAC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1491,7 @@ static void emac_adjust_link(struct net_device *ndev)
*************************************************************************/
/**
- * emac_devioctl: EMAC adapter ioctl
+ * emac_devioctl - EMAC adapter ioctl
* @ndev: The DaVinci EMAC network adapter
* @ifrq: request parameter
* @cmd: command parameter
@@ -1516,7 +1518,7 @@ static int match_first_device(struct device *dev, void *data)
}
/**
- * emac_dev_open: EMAC device open
+ * emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to start the interface. We init TX/RX channels
@@ -1535,6 +1537,8 @@ static int emac_dev_open(struct net_device *ndev)
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ pm_runtime_get(&priv->pdev->dev);
+
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
ndev->dev_addr[cnt] = priv->mac_addr[cnt];
@@ -1604,7 +1608,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id);
ret = PTR_ERR(priv->phydev);
priv->phydev = NULL;
- return ret;
+ goto err;
}
priv->link = 0;
@@ -1645,11 +1649,15 @@ rollback:
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
m = res->end;
}
- return -EBUSY;
+
+ ret = -EBUSY;
+err:
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
/**
- * emac_dev_stop: EMAC device stop
+ * emac_dev_stop - EMAC device stop
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to stop or down the interface. We stop the network
@@ -1687,11 +1695,12 @@ static int emac_dev_stop(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+ pm_runtime_put(&priv->pdev->dev);
return 0;
}
/**
- * emac_dev_getnetstats: EMAC get statistics function
+ * emac_dev_getnetstats - EMAC get statistics function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to get statistics from the device.
@@ -1762,8 +1771,79 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+#ifdef CONFIG_OF
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ struct device_node *np;
+ struct emac_platform_data *pdata = NULL;
+ const u8 *mac_addr;
+ u32 data;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto nodata;
+ }
+
+ np = pdev->dev.of_node;
+ if (!np)
+ goto nodata;
+ else
+ pdata->version = EMAC_VERSION_2;
+
+ if (!is_valid_ether_addr(pdata->mac_addr)) {
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ }
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data);
+ if (!ret)
+ pdata->ctrl_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+ &data);
+ if (!ret)
+ pdata->ctrl_mod_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data);
+ if (!ret)
+ pdata->ctrl_ram_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data);
+ if (!ret)
+ pdata->ctrl_ram_size = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data);
+ if (!ret)
+ pdata->rmii_en = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data);
+ if (!ret)
+ pdata->no_bd_ram = data;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node)
+ pdata->phy_id = "";
+
+ pdev->dev.platform_data = pdata;
+nodata:
+ return pdata;
+}
+#else
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ return pdev->dev.platform_data;
+}
+#endif
/**
- * davinci_emac_probe: EMAC device probe
+ * davinci_emac_probe - EMAC device probe
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when probing for emac devicesr. We get details of instances and
@@ -1780,6 +1860,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct emac_platform_data *pdata;
struct device *emac_dev;
struct cpdma_params dma_params;
+ struct clk *emac_clk;
+ unsigned long emac_bus_frequency;
+
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -1788,12 +1871,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ clk_put(emac_clk);
+
/* TODO: Probe PHY here if possible */
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
rc = -ENOMEM;
- goto free_clk;
+ goto no_ndev;
}
platform_set_drvdata(pdev, ndev);
@@ -1804,7 +1889,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- pdata = pdev->dev.platform_data;
+ pdata = davinci_emac_of_get_pdata(pdev, priv);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
@@ -1909,15 +1994,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
- clk_enable(emac_clk);
-
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
- goto netdev_reg_err;
+ goto no_irq_res;
}
@@ -1926,10 +2009,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
return 0;
-netdev_reg_err:
- clk_disable(emac_clk);
no_irq_res:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
@@ -1943,13 +2028,12 @@ no_dma:
probe_quit:
free_netdev(ndev);
-free_clk:
- clk_put(emac_clk);
+no_ndev:
return rc;
}
/**
- * davinci_emac_remove: EMAC device remove
+ * davinci_emac_remove - EMAC device remove
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when removing the device driver. We disable clock usage and release
@@ -1978,9 +2062,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
iounmap(priv->remap_addr);
free_netdev(ndev);
- clk_disable(emac_clk);
- clk_put(emac_clk);
-
return 0;
}
@@ -1992,8 +2073,6 @@ static int davinci_emac_suspend(struct device *dev)
if (netif_running(ndev))
emac_dev_stop(ndev);
- clk_disable(emac_clk);
-
return 0;
}
@@ -2002,8 +2081,6 @@ static int davinci_emac_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- clk_enable(emac_clk);
-
if (netif_running(ndev))
emac_dev_open(ndev);
@@ -2015,21 +2092,26 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
.resume = davinci_emac_resume,
};
-/**
- * davinci_emac_driver: EMAC platform driver structure
- */
+static const struct of_device_id davinci_emac_of_match[] = {
+ {.compatible = "ti,davinci-dm6467-emac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+
+/* davinci_emac_driver: EMAC platform driver structure */
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
.owner = THIS_MODULE,
.pm = &davinci_emac_pm_ops,
+ .of_match_table = of_match_ptr(davinci_emac_of_match),
},
.probe = davinci_emac_probe,
.remove = __devexit_p(davinci_emac_remove),
};
/**
- * davinci_emac_init: EMAC driver module init
+ * davinci_emac_init - EMAC driver module init
*
* Called when initializing the driver. We register the driver with
* the platform.
@@ -2041,7 +2123,7 @@ static int __init davinci_emac_init(void)
late_initcall(davinci_emac_init);
/**
- * davinci_emac_exit: EMAC driver module exit
+ * davinci_emac_exit - EMAC driver module exit
*
* Called when exiting the driver completely. We unregister the driver with
* the platform and exit
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index e4e47088e26..cd7ee204e94 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
/*
@@ -321,7 +322,9 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
- data->clk = clk_get(dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ data->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -329,8 +332,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
}
- clk_enable(data->clk);
-
dev_set_drvdata(dev, data);
data->dev = dev;
spin_lock_init(&data->lock);
@@ -378,10 +379,10 @@ bail_out:
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
kfree(data);
@@ -396,10 +397,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_set_drvdata(dev, NULL);
@@ -421,8 +422,7 @@ static int davinci_mdio_suspend(struct device *dev)
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- if (data->clk)
- clk_disable(data->clk);
+ pm_runtime_put_sync(data->dev);
data->suspended = true;
spin_unlock(&data->lock);
@@ -436,8 +436,7 @@ static int davinci_mdio_resume(struct device *dev)
u32 ctrl;
spin_lock(&data->lock);
- if (data->clk)
- clk_enable(data->clk);
+ pm_runtime_put_sync(data->dev);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 83b4b388ad4..4e2a1628484 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -123,6 +123,7 @@ struct tile_net_comps {
/* The transmit wake timer for a given cpu and echannel. */
struct tile_net_tx_wake {
+ int tx_queue_idx;
struct hrtimer timer;
struct net_device *dev;
};
@@ -573,12 +574,14 @@ static void add_comp(gxio_mpipe_equeue_t *equeue,
comps->comp_next++;
}
-static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
+ int tx_queue_idx)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
- hrtimer_start(&info->tx_wake[priv->echannel].timer,
+ hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
HRTIMER_MODE_REL_PINNED);
}
@@ -587,7 +590,7 @@ static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
{
struct tile_net_tx_wake *tx_wake =
container_of(t, struct tile_net_tx_wake, timer);
- netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+ netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx);
return HRTIMER_NORESTART;
}
@@ -1218,6 +1221,7 @@ static int tile_net_open(struct net_device *dev)
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
+ tx_wake->tx_queue_idx = cpu;
tx_wake->timer.function = tile_net_handle_tx_wake_timer;
tx_wake->dev = dev;
}
@@ -1291,6 +1295,7 @@ static inline void *tile_net_frag_buf(skb_frag_t *f)
* stop the queue and schedule the tx_wake timer.
*/
static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+ int tx_queue_idx,
struct tile_net_comps *comps,
gxio_mpipe_equeue_t *equeue,
int num_edescs)
@@ -1313,8 +1318,8 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev,
}
/* Still nothing; give up and stop the queue for a short while. */
- netif_stop_subqueue(dev, smp_processor_id());
- tile_net_schedule_tx_wake_timer(dev);
+ netif_stop_subqueue(dev, tx_queue_idx);
+ tile_net_schedule_tx_wake_timer(dev, tx_queue_idx);
return -1;
}
@@ -1328,11 +1333,12 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev,
static int tso_count_edescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int num_edescs = 0;
int segment;
@@ -1377,13 +1383,14 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
struct skb_shared_info *sh = skb_shinfo(skb);
struct iphdr *ih;
struct tcphdr *th;
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned char *data = skb->data;
- unsigned int ih_off, th_off, sh_len, p_len;
+ unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int segment;
@@ -1392,14 +1399,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
th = tcp_hdr(skb);
ih_off = skb_network_offset(skb);
th_off = skb_transport_offset(skb);
- sh_len = th_off + tcp_hdrlen(skb);
p_len = sh->gso_size;
/* Set up seed values for IP and TCP csum and initialize id and seq. */
isum_seed = ((0xFFFF - ih->check) +
(0xFFFF - ih->tot_len) +
(0xFFFF - ih->id));
- tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+ tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len));
id = ntohs(ih->id);
seq = ntohl(th->seq);
@@ -1471,21 +1477,22 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
{
struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int p_len = sh->gso_size;
gxio_mpipe_edesc_t edesc_head = { { 0 } };
gxio_mpipe_edesc_t edesc_body = { { 0 } };
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
+ void *f_data = skb->data;
long n; /* size of the current piece of payload */
unsigned long tx_packets = 0, tx_bytes = 0;
- unsigned int csum_start, sh_len;
+ unsigned int csum_start;
int segment;
/* Prepare to egress the headers: set up header edesc. */
csum_start = skb_checksum_start_offset(skb);
- sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
edesc_head.csum = 1;
edesc_head.csum_start = csum_start;
edesc_head.csum_dest = csum_start + skb->csum_offset;
@@ -1497,7 +1504,6 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
/* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) {
- void *va;
unsigned char *buf;
unsigned int p_used = 0;
@@ -1516,10 +1522,9 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
f_id++;
f_size = sh->frags[f_id].size;
f_used = 0;
+ f_data = tile_net_frag_buf(&sh->frags[f_id]);
}
- va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
-
/* Use bytes from the current fragment. */
n = p_len - p_used;
if (n > f_size - f_used)
@@ -1528,7 +1533,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
p_used += n;
/* Egress a piece of the payload. */
- edesc_body.va = va_to_tile_io_addr(va);
+ edesc_body.va = va_to_tile_io_addr(f_data) + f_used;
edesc_body.xfer_size = n;
edesc_body.bound = !(p_used < p_len);
gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
@@ -1580,7 +1585,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
local_irq_save(irqflags);
/* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
if (slot < 0) {
local_irq_restore(irqflags);
return NETDEV_TX_BUSY;
@@ -1674,7 +1680,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
local_irq_save(irqflags);
/* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
if (slot < 0) {
local_irq_restore(irqflags);
return NETDEV_TX_BUSY;
@@ -1844,7 +1851,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
memcpy(dev->dev_addr, mac, 6);
dev->addr_len = 6;
} else {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
/* Register the network device. */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b387b..c1ebfe9efcb 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
out_be32(card->regs + reg, value);
}
-/** spider_net_write_phy - write to phy register
+/**
+ * spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
* @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
}
-/** spider_net_read_phy - read from phy register
+/**
+ * spider_net_read_phy - read from phy register
* @netdev: network device to be read from
* @mii_id: id of MII
* @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a21ba7..a46c1985968 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
- velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a75e9ef5a4c..a5826a3111a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,7 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3306a20ec21..bdd8891c215 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,7 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf7128afe..f8e35188011 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
/**
- * * temac_dma_bd_release - Release buffer descriptor rings
+ * temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
skb_put(skb, length);
- skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9c365e192a3..0793299bd39 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -312,7 +312,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,