aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c83
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig20
-rw-r--r--drivers/net/Makefile11
-rw-r--r--drivers/net/acenic.c3
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c4
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_main.c29
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/Kconfig13
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c1186
-rw-r--r--drivers/net/can/sja1000/ems_pci.c16
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/usb/Makefile5
-rw-r--r--drivers/net/can/usb/ems_usb.c1155
-rw-r--r--drivers/net/cnic.c7
-rw-r--r--drivers/net/cnic_if.h4
-rw-r--r--drivers/net/cpmac.c8
-rw-r--r--drivers/net/cris/eth_v10.c20
-rw-r--r--drivers/net/davinci_emac.c45
-rw-r--r--drivers/net/depca.c1
-rw-r--r--drivers/net/e100.c1
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_ethtool.c202
-rw-r--r--drivers/net/e1000/e1000_hw.c12914
-rw-r--r--drivers/net/e1000/e1000_hw.h3231
-rw-r--r--drivers/net/e1000/e1000_main.c825
-rw-r--r--drivers/net/e1000/e1000_param.c22
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/e1000e/netdev.c13
-rw-r--r--drivers/net/ehea/ehea_main.c1
-rw-r--r--drivers/net/eql.c1
-rw-r--r--drivers/net/ethoc.c83
-rw-r--r--drivers/net/ewrk3.c1
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/fec_mpc52xx_phy.c1
-rw-r--r--drivers/net/forcedeth.c1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/fsl_pq_mdio.c1
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/hamachi.c1
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c1
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/ibm_newemac/core.c9
-rw-r--r--drivers/net/ibm_newemac/emac.h1
-rw-r--r--drivers/net/igb/e1000_mac.c72
-rw-r--r--drivers/net/igb/e1000_mac.h1
-rw-r--r--drivers/net/igb/igb_ethtool.c1
-rw-r--r--drivers/net/igb/igb_main.c13
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c47
-rw-r--r--drivers/net/irda/sa1100_ir.c7
-rw-r--r--drivers/net/irda/toim3232-sir.c1
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c232
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c79
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c167
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h11
-rw-r--r--drivers/net/ixp2000/enp2611.c18
-rw-r--r--drivers/net/ixp2000/ixpdev.c11
-rw-r--r--drivers/net/ks8851_mll.c1697
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/mlx4/main.c1
-rw-r--r--drivers/net/netxen/netxen_nic_main.c13
-rw-r--r--drivers/net/ns83820.c1
-rw-r--r--drivers/net/pasemi_mac_ethtool.c3
-rw-r--r--drivers/net/pcmcia/3c574_cs.c13
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c21
-rw-r--r--drivers/net/pcnet32.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/qlge.h36
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c139
-rw-r--r--drivers/net/qlge/qlge_mpi.c105
-rw-r--r--drivers/net/r8169.c987
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/skfp/skfddi.c1
-rw-r--r--drivers/net/skge.c17
-rw-r--r--drivers/net/skge.h2
-rw-r--r--drivers/net/sky2.c11
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/stmmac/Kconfig53
-rw-r--r--drivers/net/stmmac/Makefile4
-rw-r--r--drivers/net/stmmac/common.h330
-rw-r--r--drivers/net/stmmac/descs.h163
-rw-r--r--drivers/net/stmmac/gmac.c693
-rw-r--r--drivers/net/stmmac/gmac.h204
-rw-r--r--drivers/net/stmmac/mac100.c517
-rw-r--r--drivers/net/stmmac/mac100.h116
-rw-r--r--drivers/net/stmmac/stmmac.h98
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c395
-rw-r--r--drivers/net/stmmac/stmmac_main.c2204
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c217
-rw-r--r--drivers/net/stmmac/stmmac_timer.c140
-rw-r--r--drivers/net/stmmac/stmmac_timer.h41
-rw-r--r--drivers/net/sungem.c1
-rw-r--r--drivers/net/sunvnet.c1
-rw-r--r--drivers/net/tg3.c41
-rw-r--r--drivers/net/tg3.h2
-rw-r--r--drivers/net/tokenring/ibmtr.c1
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/pegasus.c13
-rw-r--r--drivers/net/usb/pegasus.h6
-rw-r--r--drivers/net/usb/rndis_host.c1
-rw-r--r--drivers/net/usb/smsc95xx.c67
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c231
-rw-r--r--drivers/net/vmxnet3/Makefile35
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h96
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h535
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2565
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c566
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h389
-rw-r--r--drivers/net/wan/c101.c1
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/cycx_x25.c1
-rw-r--r--drivers/net/wan/dscc4.c1
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wan/hdlc_cisco.c18
-rw-r--r--drivers/net/wan/n2.c1
-rw-r--r--drivers/net/wan/pci200syn.c1
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/adm8211.h2
-rw-r--r--drivers/net/wireless/arlan-proc.c28
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c202
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h3
-rw-r--r--drivers/net/wireless/b43/Kconfig21
-rw-r--r--drivers/net/wireless/b43/Makefile1
-rw-r--r--drivers/net/wireless/b43/b43.h185
-rw-r--r--drivers/net/wireless/b43/debugfs.c1
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c4
-rw-r--r--drivers/net/wireless/b43/leds.c266
-rw-r--r--drivers/net/wireless/b43/leds.h33
-rw-r--r--drivers/net/wireless/b43/main.c229
-rw-r--r--drivers/net/wireless/b43/phy_lp.c12
-rw-r--r--drivers/net/wireless/b43/pio.c95
-rw-r--r--drivers/net/wireless/b43/rfkill.c2
-rw-r--r--drivers/net/wireless/b43/sdio.c202
-rw-r--r--drivers/net/wireless/b43/sdio.h45
-rw-r--r--drivers/net/wireless/b43/xmit.c10
-rw-r--r--drivers/net/wireless/b43legacy/main.c1
-rw-r--r--drivers/net/wireless/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c43
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1
-rw-r--r--drivers/net/wireless/libertas/cmd.c1
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c1
-rw-r--r--drivers/net/wireless/libertas/tx.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xilinx_emaclite.c7
-rw-r--r--drivers/net/znet.c8
216 files changed, 23359 insertions, 12128 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7adff4d0960..975e25b19eb 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -805,58 +805,54 @@ static void poll_vortex(struct net_device *dev)
#ifdef CONFIG_PM
-static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
+static int vortex_suspend(struct device *dev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ vortex_down(ndev, 1);
- if (dev && netdev_priv(dev)) {
- if (netif_running(dev)) {
- netif_device_detach(dev);
- vortex_down(dev, 1);
- }
- pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
- free_irq(dev->irq, dev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- }
return 0;
}
-static int vortex_resume(struct pci_dev *pdev)
+static int vortex_resume(struct device *dev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct vortex_private *vp = netdev_priv(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *ndev = pci_get_drvdata(pdev);
int err;
- if (dev && vp) {
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err) {
- pr_warning("%s: Could not enable device\n",
- dev->name);
- return err;
- }
- pci_set_master(pdev);
- if (request_irq(dev->irq, vp->full_bus_master_rx ?
- &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
- pr_warning("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
- pci_disable_device(pdev);
- return -EBUSY;
- }
- if (netif_running(dev)) {
- err = vortex_up(dev);
- if (err)
- return err;
- else
- netif_device_attach(dev);
- }
- }
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ err = vortex_up(ndev);
+ if (err)
+ return err;
+
+ netif_device_attach(ndev);
+
return 0;
}
-#endif /* CONFIG_PM */
+static struct dev_pm_ops vortex_pm_ops = {
+ .suspend = vortex_suspend,
+ .resume = vortex_resume,
+ .freeze = vortex_suspend,
+ .thaw = vortex_resume,
+ .poweroff = vortex_suspend,
+ .restore = vortex_resume,
+};
+
+#define VORTEX_PM_OPS (&vortex_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define VORTEX_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
#ifdef CONFIG_EISA
static struct eisa_device_id vortex_eisa_ids[] = {
@@ -3205,10 +3201,7 @@ static struct pci_driver vortex_driver = {
.probe = vortex_init_one,
.remove = __devexit_p(vortex_remove_one),
.id_table = vortex_pci_tbl,
-#ifdef CONFIG_PM
- .suspend = vortex_suspend,
- .resume = vortex_resume,
-#endif
+ .driver.pm = VORTEX_PM_OPS,
};
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 462d9f59c53..83a1922e68e 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -87,7 +87,7 @@
/* These identify the driver base version and may not be removed. */
static char version[] =
-KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ed5741b2e70..e19ca4bb751 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1738,6 +1738,14 @@ config KS8851
help
SPI driver for Micrel KS8851 SPI attached network chip.
+config KS8851_MLL
+ tristate "Micrel KS8851 MLL"
+ depends on HAS_IOMEM
+ select MII
+ help
+ This platform driver is for Micrel KS8851 Address/data bus
+ multiplexed network chip.
+
config VIA_RHINE
tristate "VIA Rhine support"
depends on NET_PCI && PCI
@@ -1875,7 +1883,7 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2475,6 +2483,8 @@ config S6GMAC
To compile this driver as a module, choose M here. The module
will be called s6gmac.
+source "drivers/net/stmmac/Kconfig"
+
endif # NETDEV_1000
#
@@ -3223,4 +3233,12 @@ config VIRTIO_NET
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+config VMXNET3
+ tristate "VMware VMXNET3 ethernet driver"
+ depends on PCI && X86 && INET
+ help
+ This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ To compile this driver as a module, choose M here: the
+ module will be called vmxnet3.
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ae8cd30f13d..246323d7f16 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,6 +2,10 @@
# Makefile for the Linux network (ethercard) device drivers.
#
+obj-$(CONFIG_MII) += mii.o
+obj-$(CONFIG_MDIO) += mdio.o
+obj-$(CONFIG_PHYLIB) += phy/
+
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_E1000) += e1000/
@@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o
obj-$(CONFIG_ENIC) += enic/
obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_BE2NET) += benet/
+obj-$(CONFIG_VMXNET3) += vmxnet3/
gianfar_driver-objs := gianfar.o \
gianfar_ethtool.o \
@@ -89,20 +94,18 @@ obj-$(CONFIG_SKY2) += sky2.o
obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_SH_ETH) += sh_eth.o
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
#
# end link order section
#
-obj-$(CONFIG_MII) += mii.o
-obj-$(CONFIG_MDIO) += mdio.o
-obj-$(CONFIG_PHYLIB) += phy/
-
obj-$(CONFIG_SUNDANCE) += sundance.o
obj-$(CONFIG_HAMACHI) += hamachi.o
obj-$(CONFIG_NET) += Space.o loopback.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 5f0b05c2d71..d82a9a99475 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev)
memset(ap->info, 0, sizeof(struct ace_info));
memset(ap->skb, 0, sizeof(struct ace_skb));
- if (ace_load_firmware(dev))
+ ecode = ace_load_firmware(dev);
+ if (ecode)
goto init_error;
ap->fw_running = 0;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index be2c6cfe6e8..1372e9a99f5 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2296,7 +2296,7 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
u32 ctrl;
u32 mac_ctrl_data;
u32 master_ctrl_data;
- u32 wol_ctrl_data;
+ u32 wol_ctrl_data = 0;
u16 mii_bmsr_data;
u16 save_autoneg_advertised;
u16 mii_intr_status_data;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index fdf5937233f..04f63c77071 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
ps->rx_errors++;
if (status & RX_MISSED_FRAME)
ps->rx_missed_errors++;
- if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
+ if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
ps->rx_length_errors++;
if (status & RX_CRC_ERROR)
ps->rx_crc_errors++;
@@ -794,8 +794,6 @@ static int au1000_rx(struct net_device *dev)
printk("rx len error\n");
if (status & RX_U_CNTRL_FRAME)
printk("rx u control frame\n");
- if (status & RX_MISSED_FRAME)
- printk("rx miss\n");
}
}
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 09d270913c5..ba29dc319b3 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -90,7 +90,7 @@ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
break;
udelay(1);
- } while (limit-- >= 0);
+ } while (limit-- > 0);
return (limit < 0) ? 1 : 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 684c6fe24c8..a80da0e14a5 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -258,6 +258,7 @@ struct be_adapter {
bool link_up;
u32 port_num;
bool promiscuous;
+ u32 cap;
};
extern const struct ethtool_ops be_ethtool_ops;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3dd76c4170b..89876ade5e3 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1068,7 +1068,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
/* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_query_fw_cfg *req;
@@ -1088,6 +1088,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num)
if (!status) {
struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
*port_num = le32_to_cpu(resp->phys_port);
+ *cap = le32_to_cpu(resp->function_cap);
}
spin_unlock(&adapter->mbox_lock);
@@ -1128,7 +1129,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
- req = embedded_payload(wrb);
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 93e432f3d92..a86f917f85f 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -62,7 +62,7 @@ enum {
MCC_STATUS_QUEUE_FLUSHING = 0x4,
/* The command is completing with a DMA error */
MCC_STATUS_DMA_FAILED = 0x5,
- MCC_STATUS_NOT_SUPPORTED = 0x66
+ MCC_STATUS_NOT_SUPPORTED = 66
};
#define CQE_STATUS_COMPL_MASK 0xFFFF
@@ -760,7 +760,8 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num);
+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
+ u32 *port_num, u32 *cap);
extern int be_cmd_reset_function(struct be_adapter *adapter);
extern int be_process_mcc(struct be_adapter *adapter);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 11445df3dbc..cda5bf2fc50 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = {
.get_rx_csum = be_get_rx_csum,
.set_rx_csum = be_set_rx_csum,
.get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 409cf059590..6d5e81f7046 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter)
/* no space available in linux */
dev_stats->tx_dropped = 0;
- dev_stats->multicast = port_stats->tx_multicastframes;
+ dev_stats->multicast = port_stats->rx_multicast_frames;
dev_stats->collisions = 0;
/* detailed tx_errors */
@@ -747,9 +747,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
struct be_eth_rx_compl *rxcp)
{
struct sk_buff *skb;
- u32 vtp, vid;
+ u32 vlanf, vid;
+ u8 vtm;
- vtp = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+ vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+ vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->cap == 0x400) && !vtm)
+ vlanf = 0;
skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
if (!skb) {
@@ -772,7 +779,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->dev = adapter->netdev;
- if (vtp) {
+ if (vlanf) {
if (!adapter->vlan_grp || adapter->num_vlans == 0) {
kfree_skb(skb);
return;
@@ -797,11 +804,18 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
struct be_eq_obj *eq_obj = &adapter->rx_eq;
u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
u16 i, rxq_idx = 0, vid, j;
+ u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
+ vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->cap == 0x400) && !vtm)
+ vlanf = 0;
skb = napi_get_frags(&eq_obj->napi);
if (!skb) {
@@ -1885,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO;
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+ NETIF_F_GRO;
netdev->flags |= IFF_MULTICAST;
@@ -2045,7 +2059,8 @@ static int be_hw_up(struct be_adapter *adapter)
if (status)
return status;
- status = be_cmd_query_fw_cfg(adapter, &adapter->port_num);
+ status = be_cmd_query_fw_cfg(adapter,
+ &adapter->port_num, &adapter->cap);
return status;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 6044e12ff9f..8762a27a2a1 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/sched.h>
#include <linux/sysdev.h>
#include <linux/fs.h>
#include <linux/types.h>
@@ -1182,6 +1183,7 @@ static ssize_t bonding_store_primary(struct device *d,
": %s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 09007437246..df32c109b7a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -75,6 +75,13 @@ config CAN_EMS_PCI
CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
(http://www.ems-wuensche.de).
+config CAN_EMS_USB
+ tristate "EMS CPC-USB/ARM7 CAN/USB interface"
+ depends on USB && CAN_DEV
+ ---help---
+ This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
+ from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+
config CAN_KVASER_PCI
tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
depends on PCI && CAN_SJA1000
@@ -82,6 +89,12 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_AT91
+ tristate "Atmel AT91 onchip CAN controller"
+ depends on CAN && CAN_DEV && ARCH_AT91SAM9263
+ ---help---
+ This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
+
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 523a941b358..0dea62721f2 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -7,6 +7,9 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_DEV) += can-dev.o
can-dev-y := dev.o
+obj-y += usb/
+
obj-$(CONFIG_CAN_SJA1000) += sja1000/
+obj-$(CONFIG_CAN_AT91) += at91_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 00000000000..f67ae285a35
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
+/*
+ * at91_can.c - CAN network driver for AT91 SoC CAN controller
+ *
+ * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2 as distributed in the 'COPYING'
+ * file from the main directory of the linux kernel source.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct at91_can_data ek_can_data = {
+ * transceiver_switch = sam9263ek_transceiver_switch,
+ * };
+ *
+ * at91_add_device_can(&ek_can_data);
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <mach/board.h>
+
+#define DRV_NAME "at91_can"
+#define AT91_NAPI_WEIGHT 12
+
+/*
+ * RX/TX Mailbox split
+ * don't dare to touch
+ */
+#define AT91_MB_RX_NUM 12
+#define AT91_MB_TX_SHIFT 2
+
+#define AT91_MB_RX_FIRST 0
+#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
+
+#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
+#define AT91_MB_RX_SPLIT 8
+#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
+#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+
+#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
+#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
+#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
+
+#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
+#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
+#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
+#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+
+/* Common registers */
+enum at91_reg {
+ AT91_MR = 0x000,
+ AT91_IER = 0x004,
+ AT91_IDR = 0x008,
+ AT91_IMR = 0x00C,
+ AT91_SR = 0x010,
+ AT91_BR = 0x014,
+ AT91_TIM = 0x018,
+ AT91_TIMESTP = 0x01C,
+ AT91_ECR = 0x020,
+ AT91_TCR = 0x024,
+ AT91_ACR = 0x028,
+};
+
+/* Mailbox registers (0 <= i <= 15) */
+#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
+#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
+#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
+#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
+#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
+#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
+#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
+#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
+
+/* Register bits */
+#define AT91_MR_CANEN BIT(0)
+#define AT91_MR_LPM BIT(1)
+#define AT91_MR_ABM BIT(2)
+#define AT91_MR_OVL BIT(3)
+#define AT91_MR_TEOF BIT(4)
+#define AT91_MR_TTM BIT(5)
+#define AT91_MR_TIMFRZ BIT(6)
+#define AT91_MR_DRPT BIT(7)
+
+#define AT91_SR_RBSY BIT(29)
+
+#define AT91_MMR_PRIO_SHIFT (16)
+
+#define AT91_MID_MIDE BIT(29)
+
+#define AT91_MSR_MRTR BIT(20)
+#define AT91_MSR_MABT BIT(22)
+#define AT91_MSR_MRDY BIT(23)
+#define AT91_MSR_MMI BIT(24)
+
+#define AT91_MCR_MRTR BIT(20)
+#define AT91_MCR_MTCR BIT(23)
+
+/* Mailbox Modes */
+enum at91_mb_mode {
+ AT91_MB_MODE_DISABLED = 0,
+ AT91_MB_MODE_RX = 1,
+ AT91_MB_MODE_RX_OVRWR = 2,
+ AT91_MB_MODE_TX = 3,
+ AT91_MB_MODE_CONSUMER = 4,
+ AT91_MB_MODE_PRODUCER = 5,
+};
+
+/* Interrupt mask bits */
+#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
+ - (1 << AT91_MB_RX_FIRST))
+#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
+ - (1 << AT91_MB_TX_FIRST))
+#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
+
+#define AT91_IRQ_ERRA (1 << 16)
+#define AT91_IRQ_WARN (1 << 17)
+#define AT91_IRQ_ERRP (1 << 18)
+#define AT91_IRQ_BOFF (1 << 19)
+#define AT91_IRQ_SLEEP (1 << 20)
+#define AT91_IRQ_WAKEUP (1 << 21)
+#define AT91_IRQ_TOVF (1 << 22)
+#define AT91_IRQ_TSTP (1 << 23)
+#define AT91_IRQ_CERR (1 << 24)
+#define AT91_IRQ_SERR (1 << 25)
+#define AT91_IRQ_AERR (1 << 26)
+#define AT91_IRQ_FERR (1 << 27)
+#define AT91_IRQ_BERR (1 << 28)
+
+#define AT91_IRQ_ERR_ALL (0x1fff0000)
+#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
+ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
+#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
+ AT91_IRQ_ERRP | AT91_IRQ_BOFF)
+
+#define AT91_IRQ_ALL (0x1fffffff)
+
+struct at91_priv {
+ struct can_priv can; /* must be the first member! */
+ struct net_device *dev;
+ struct napi_struct napi;
+
+ void __iomem *reg_base;
+
+ u32 reg_sr;
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ unsigned int rx_next;
+
+ struct clk *clk;
+ struct at91_can_data *pdata;
+};
+
+static struct can_bittiming_const at91_bittiming_const = {
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 1,
+};
+
+static inline int get_tx_next_mb(const struct at91_priv *priv)
+{
+ return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline int get_tx_next_prio(const struct at91_priv *priv)
+{
+ return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+}
+
+static inline int get_tx_echo_mb(const struct at91_priv *priv)
+{
+ return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
+{
+ return readl(priv->reg_base + reg);
+}
+
+static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
+ u32 value)
+{
+ writel(value, priv->reg_base + reg);
+}
+
+static inline void set_mb_mode_prio(const struct at91_priv *priv,
+ unsigned int mb, enum at91_mb_mode mode, int prio)
+{
+ at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
+}
+
+static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
+ enum at91_mb_mode mode)
+{
+ set_mb_mode_prio(priv, mb, mode, 0);
+}
+
+static struct sk_buff *alloc_can_skb(struct net_device *dev,
+ struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+ return skb;
+}
+
+static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, cf);
+ if (unlikely(!skb))
+ return NULL;
+
+ memset(*cf, 0, sizeof(struct can_frame));
+ (*cf)->can_id = CAN_ERR_FLAG;
+ (*cf)->can_dlc = CAN_ERR_DLC;
+
+ return skb;
+}
+
+/*
+ * Swtich transceiver on or off
+ */
+static void at91_transceiver_switch(const struct at91_priv *priv, int on)
+{
+ if (priv->pdata && priv->pdata->transceiver_switch)
+ priv->pdata->transceiver_switch(on);
+}
+
+static void at91_setup_mailboxes(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ /*
+ * The first 12 mailboxes are used as a reception FIFO. The
+ * last mailbox is configured with overwrite option. The
+ * overwrite flag indicates a FIFO overflow.
+ */
+ for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+ set_mb_mode(priv, i, AT91_MB_MODE_RX);
+ set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+
+ /* The last 4 mailboxes are used for transmitting. */
+ for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+ set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
+
+ /* Reset tx and rx helper pointers */
+ priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+}
+
+static int at91_set_bittiming(struct net_device *dev)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 reg_br;
+
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
+ ((bt->phase_seg2 - 1) << 0);
+
+ dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+
+ at91_write(priv, AT91_BR, reg_br);
+
+ return 0;
+}
+
+static void at91_chip_start(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_mr, reg_ier;
+
+ /* disable interrupts */
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+ /* disable chip */
+ reg_mr = at91_read(priv, AT91_MR);
+ at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+ at91_setup_mailboxes(dev);
+ at91_transceiver_switch(priv, 1);
+
+ /* enable chip */
+ at91_write(priv, AT91_MR, AT91_MR_CANEN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Enable interrupts */
+ reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+ at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_chip_stop(struct net_device *dev, enum can_state state)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_mr;
+
+ /* disable interrupts */
+ at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+ reg_mr = at91_read(priv, AT91_MR);
+ at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+ at91_transceiver_switch(priv, 0);
+ priv->can.state = state;
+}
+
+/*
+ * theory of operation:
+ *
+ * According to the datasheet priority 0 is the highest priority, 15
+ * is the lowest. If two mailboxes have the same priority level the
+ * message of the mailbox with the lowest number is sent first.
+ *
+ * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
+ * the next mailbox with prio 0, and so on, until all mailboxes are
+ * used. Then we start from the beginning with mailbox
+ * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
+ * prio 1. When we reach the last mailbox with prio 15, we have to
+ * stop sending, waiting for all messages to be delivered, then start
+ * again with mailbox AT91_MB_TX_FIRST prio 0.
+ *
+ * We use the priv->tx_next as counter for the next transmission
+ * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
+ * encode the mailbox number, the upper 4 bits the mailbox priority:
+ *
+ * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
+ * (mb - AT91_MB_TX_FIRST);
+ *
+ */
+static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mb, prio;
+ u32 reg_mid, reg_mcr;
+
+ mb = get_tx_next_mb(priv);
+ prio = get_tx_next_prio(priv);
+
+ if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
+ netif_stop_queue(dev);
+
+ dev_err(dev->dev.parent,
+ "BUG! TX buffer full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+ else
+ reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
+
+ reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
+ (cf->can_dlc << 16) | AT91_MCR_MTCR;
+
+ /* disable MB while writing ID (see datasheet) */
+ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
+ at91_write(priv, AT91_MID(mb), reg_mid);
+ set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
+
+ at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
+ at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
+
+ /* This triggers transmission */
+ at91_write(priv, AT91_MCR(mb), reg_mcr);
+
+ stats->tx_bytes += cf->can_dlc;
+ dev->trans_start = jiffies;
+
+ /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+
+ /*
+ * we have to stop the queue and deliver all messages in case
+ * of a prio+mb counter wrap around. This is the case if
+ * tx_next buffer prio and mailbox equals 0.
+ *
+ * also stop the queue if next buffer is still in use
+ * (== not ready)
+ */
+ priv->tx_next++;
+ if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
+ AT91_MSR_MRDY) ||
+ (priv->tx_next & AT91_NEXT_MASK) == 0)
+ netif_stop_queue(dev);
+
+ /* Enable interrupt for this mailbox */
+ at91_write(priv, AT91_IER, 1 << mb);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * at91_activate_rx_low - activate lower rx mailboxes
+ * @priv: a91 context
+ *
+ * Reenables the lower mailboxes for reception of new CAN messages
+ */
+static inline void at91_activate_rx_low(const struct at91_priv *priv)
+{
+ u32 mask = AT91_MB_RX_LOW_MASK;
+ at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_activate_rx_mb - reactive single rx mailbox
+ * @priv: a91 context
+ * @mb: mailbox to reactivate
+ *
+ * Reenables given mailbox for reception of new CAN messages
+ */
+static inline void at91_activate_rx_mb(const struct at91_priv *priv,
+ unsigned int mb)
+{
+ u32 mask = 1 << mb;
+ at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_rx_overflow_err - send error frame due to rx overflow
+ * @dev: net device
+ */
+static void at91_rx_overflow_err(struct net_device *dev)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
+ * @dev: net device
+ * @mb: mailbox number to read from
+ * @cf: can frame where to store message
+ *
+ * Reads a CAN message from the given mailbox and stores data into
+ * given can frame. "mb" and "cf" must be valid.
+ */
+static void at91_read_mb(struct net_device *dev, unsigned int mb,
+ struct can_frame *cf)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_msr, reg_mid;
+
+ reg_mid = at91_read(priv, AT91_MID(mb));
+ if (reg_mid & AT91_MID_MIDE)
+ cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
+
+ reg_msr = at91_read(priv, AT91_MSR(mb));
+ if (reg_msr & AT91_MSR_MRTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+
+ *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+ *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+
+ if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+ at91_rx_overflow_err(dev);
+}
+
+/**
+ * at91_read_msg - read CAN message from mailbox
+ * @dev: net device
+ * @mb: mail box to read from
+ *
+ * Reads a CAN message from given mailbox, and put into linux network
+ * RX queue, does all housekeeping chores (stats, ...)
+ */
+static void at91_read_msg(struct net_device *dev, unsigned int mb)
+{
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ at91_read_mb(dev, mb, cf);
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_poll_rx - read multiple CAN messages from mailboxes
+ * @dev: net device
+ * @quota: max number of pkgs we're allowed to receive
+ *
+ * Theory of Operation:
+ *
+ * 12 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ *
+ * Like it or not, but the chip always saves a received CAN message
+ * into the first free mailbox it finds (starting with the
+ * lowest). This makes it very difficult to read the messages in the
+ * right order from the chip. This is how we work around that problem:
+ *
+ * The first message goes into mb nr. 0 and issues an interrupt. All
+ * rx ints are disabled in the interrupt handler and a napi poll is
+ * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * receive another message).
+ *
+ * lower mbxs upper
+ * ______^______ __^__
+ * / \ / \
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * |x|x|x|x|x|x|x|x|| | | | |
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
+ * 0 1 2 3 4 5 6 7 8 9 0 1 / box
+ *
+ * The variable priv->rx_next points to the next mailbox to read a
+ * message from. As long we're in the lower mailboxes we just read the
+ * mailbox but not reenable it.
+ *
+ * With completion of the last of the lower mailboxes, we reenable the
+ * whole first group, but continue to look for filled mailboxes in the
+ * upper mailboxes. Imagine the second group like overflow mailboxes,
+ * which takes CAN messages if the lower goup is full. While in the
+ * upper group we reenable the mailbox right after reading it. Giving
+ * the chip more room to store messages.
+ *
+ * After finishing we look again in the lower group if we've still
+ * quota.
+ *
+ */
+static int at91_poll_rx(struct net_device *dev, int quota)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_sr = at91_read(priv, AT91_SR);
+ const unsigned long *addr = (unsigned long *)&reg_sr;
+ unsigned int mb;
+ int received = 0;
+
+ if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+ reg_sr & AT91_MB_RX_LOW_MASK)
+ dev_info(dev->dev.parent,
+ "order of incoming frames cannot be guaranteed\n");
+
+ again:
+ for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
+ mb < AT91_MB_RX_NUM && quota > 0;
+ reg_sr = at91_read(priv, AT91_SR),
+ mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+ at91_read_msg(dev, mb);
+
+ /* reactivate mailboxes */
+ if (mb == AT91_MB_RX_LOW_LAST)
+ /* all lower mailboxed, if just finished it */
+ at91_activate_rx_low(priv);
+ else if (mb > AT91_MB_RX_LOW_LAST)
+ /* only the mailbox we read */
+ at91_activate_rx_mb(priv, mb);
+
+ received++;
+ quota--;
+ }
+
+ /* upper group completed, look again in lower */
+ if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+ quota > 0 && mb >= AT91_MB_RX_NUM) {
+ priv->rx_next = 0;
+ goto again;
+ }
+
+ return received;
+}
+
+static void at91_poll_err_frame(struct net_device *dev,
+ struct can_frame *cf, u32 reg_sr)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+
+ /* CRC error */
+ if (reg_sr & AT91_IRQ_CERR) {
+ dev_dbg(dev->dev.parent, "CERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ }
+
+ /* Stuffing Error */
+ if (reg_sr & AT91_IRQ_SERR) {
+ dev_dbg(dev->dev.parent, "SERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ /* Acknowledgement Error */
+ if (reg_sr & AT91_IRQ_AERR) {
+ dev_dbg(dev->dev.parent, "AERR irq\n");
+ dev->stats.tx_errors++;
+ cf->can_id |= CAN_ERR_ACK;
+ }
+
+ /* Form error */
+ if (reg_sr & AT91_IRQ_FERR) {
+ dev_dbg(dev->dev.parent, "FERR irq\n");
+ dev->stats.rx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ /* Bit Error */
+ if (reg_sr & AT91_IRQ_BERR) {
+ dev_dbg(dev->dev.parent, "BERR irq\n");
+ dev->stats.tx_errors++;
+ priv->can.can_stats.bus_error++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ }
+}
+
+static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ if (quota == 0)
+ return 0;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ at91_poll_err_frame(dev, cf, reg_sr);
+ netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int at91_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *dev = napi->dev;
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_sr = at91_read(priv, AT91_SR);
+ int work_done = 0;
+
+ if (reg_sr & AT91_IRQ_MB_RX)
+ work_done += at91_poll_rx(dev, quota - work_done);
+
+ /*
+ * The error bits are clear on read,
+ * so use saved value from irq handler.
+ */
+ reg_sr |= priv->reg_sr;
+ if (reg_sr & AT91_IRQ_ERR_FRAME)
+ work_done += at91_poll_err(dev, quota - work_done, reg_sr);
+
+ if (work_done < quota) {
+ /* enable IRQs for frame errors and all mailboxes >= rx_next */
+ u32 reg_ier = AT91_IRQ_ERR_FRAME;
+ reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+
+ napi_complete(napi);
+ at91_write(priv, AT91_IER, reg_ier);
+ }
+
+ return work_done;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework. If
+ * we discover a not yet transmitted package, stop looking for more.
+ *
+ */
+static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_msr;
+ unsigned int mb;
+
+ /* masking of reg_sr not needed, already done by at91_irq */
+
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ mb = get_tx_echo_mb(priv);
+
+ /* no event in mailbox? */
+ if (!(reg_sr & (1 << mb)))
+ break;
+
+ /* Disable irq for this TX mailbox */
+ at91_write(priv, AT91_IDR, 1 << mb);
+
+ /*
+ * only echo if mailbox signals us a transfer
+ * complete (MSR_MRDY). Otherwise it's a tansfer
+ * abort. "can_bus_off()" takes care about the skbs
+ * parked in the echo queue.
+ */
+ reg_msr = at91_read(priv, AT91_MSR(mb));
+ if (likely(reg_msr & AT91_MSR_MRDY &&
+ ~reg_msr & AT91_MSR_MABT)) {
+ /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+ dev->stats.tx_packets++;
+ }
+ }
+
+ /*
+ * restart queue if we don't have a wrap around but restart if
+ * we get a TX int for the last can frame directly before a
+ * wrap around.
+ */
+ if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
+ (priv->tx_echo & AT91_NEXT_MASK) == 0)
+ netif_wake_queue(dev);
+}
+
+static void at91_irq_err_state(struct net_device *dev,
+ struct can_frame *cf, enum can_state new_state)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_idr, reg_ier, reg_ecr;
+ u8 tec, rec;
+
+ reg_ecr = at91_read(priv, AT91_ECR);
+ rec = reg_ecr & 0xff;
+ tec = reg_ecr >> 16;
+
+ switch (priv->can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /*
+ * from: ERROR_ACTIVE
+ * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+ * => : there was a warning int
+ */
+ if (new_state >= CAN_STATE_ERROR_WARNING &&
+ new_state <= CAN_STATE_BUS_OFF) {
+ dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ priv->can.can_stats.error_warning++;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (tec > rec) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ /*
+ * from: ERROR_ACTIVE, ERROR_WARNING
+ * to : ERROR_PASSIVE, BUS_OFF
+ * => : error passive int
+ */
+ if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+ new_state <= CAN_STATE_BUS_OFF) {
+ dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ priv->can.can_stats.error_passive++;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (tec > rec) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ break;
+ case CAN_STATE_BUS_OFF:
+ /*
+ * from: BUS_OFF
+ * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
+ */
+ if (new_state <= CAN_STATE_ERROR_PASSIVE) {
+ cf->can_id |= CAN_ERR_RESTARTED;
+
+ dev_dbg(dev->dev.parent, "restarted\n");
+ priv->can.can_stats.restarts++;
+
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ /* process state changes depending on the new state */
+ switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /*
+ * actually we want to enable AT91_IRQ_WARN here, but
+ * it screws up the system under certain
+ * circumstances. so just enable AT91_IRQ_ERRP, thus
+ * the "fallthrough"
+ */
+ dev_dbg(dev->dev.parent, "Error Active\n");
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
+ reg_ier = AT91_IRQ_ERRP;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
+ reg_ier = AT91_IRQ_BOFF;
+ break;
+ case CAN_STATE_BUS_OFF:
+ reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
+ AT91_IRQ_WARN | AT91_IRQ_BOFF;
+ reg_ier = 0;
+
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ dev_dbg(dev->dev.parent, "bus-off\n");
+ netif_carrier_off(dev);
+ priv->can.can_stats.bus_off++;
+
+ /* turn off chip, if restart is disabled */
+ if (!priv->can.restart_ms) {
+ at91_chip_stop(dev, CAN_STATE_BUS_OFF);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ at91_write(priv, AT91_IDR, reg_idr);
+ at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_irq_err(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ enum can_state new_state;
+ u32 reg_sr;
+
+ reg_sr = at91_read(priv, AT91_SR);
+
+ /* we need to look at the unmasked reg_sr */
+ if (unlikely(reg_sr & AT91_IRQ_BOFF))
+ new_state = CAN_STATE_BUS_OFF;
+ else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (unlikely(reg_sr & AT91_IRQ_WARN))
+ new_state = CAN_STATE_ERROR_WARNING;
+ else if (likely(reg_sr & AT91_IRQ_ERRA))
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ else {
+ dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ return;
+ }
+
+ /* state hasn't changed */
+ if (likely(new_state == priv->can.state))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return;
+
+ at91_irq_err_state(dev, cf, new_state);
+ netif_rx(skb);
+
+ dev->last_rx = jiffies;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += cf->can_dlc;
+
+ priv->can.state = new_state;
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t at91_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct at91_priv *priv = netdev_priv(dev);
+ irqreturn_t handled = IRQ_NONE;
+ u32 reg_sr, reg_imr;
+
+ reg_sr = at91_read(priv, AT91_SR);
+ reg_imr = at91_read(priv, AT91_IMR);
+
+ /* Ignore masked interrupts */
+ reg_sr &= reg_imr;
+ if (!reg_sr)
+ goto exit;
+
+ handled = IRQ_HANDLED;
+
+ /* Receive or error interrupt? -> napi */
+ if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+ /*
+ * The error bits are clear on read,
+ * save for later use.
+ */
+ priv->reg_sr = reg_sr;
+ at91_write(priv, AT91_IDR,
+ AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+ napi_schedule(&priv->napi);
+ }
+
+ /* Transmission complete interrupt */
+ if (reg_sr & AT91_IRQ_MB_TX)
+ at91_irq_tx(dev, reg_sr);
+
+ at91_irq_err(dev);
+
+ exit:
+ return handled;
+}
+
+static int at91_open(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+ int err;
+
+ clk_enable(priv->clk);
+
+ /* check or determine and set bittime */
+ err = open_candev(dev);
+ if (err)
+ goto out;
+
+ /* register interrupt handler */
+ if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
+ dev->name, dev)) {
+ err = -EAGAIN;
+ goto out_close;
+ }
+
+ /* start chip and queuing */
+ at91_chip_start(dev);
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+ out_close:
+ close_candev(dev);
+ out:
+ clk_disable(priv->clk);
+
+ return err;
+}
+
+/*
+ * stop CAN bus activity
+ */
+static int at91_close(struct net_device *dev)
+{
+ struct at91_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ at91_chip_stop(dev, CAN_STATE_STOPPED);
+
+ free_irq(dev->irq, dev);
+ clk_disable(priv->clk);
+
+ close_candev(dev);
+
+ return 0;
+}
+
+static int at91_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ at91_chip_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops at91_netdev_ops = {
+ .ndo_open = at91_open,
+ .ndo_stop = at91_close,
+ .ndo_start_xmit = at91_start_xmit,
+};
+
+static int __init at91_can_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct at91_priv *priv;
+ struct resource *res;
+ struct clk *clk;
+ void __iomem *addr;
+ int err, irq;
+
+ clk = clk_get(&pdev->dev, "can_clk");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || !irq) {
+ err = -ENODEV;
+ goto exit_put;
+ }
+
+ if (!request_mem_region(res->start,
+ resource_size(res),
+ pdev->name)) {
+ err = -EBUSY;
+ goto exit_put;
+ }
+
+ addr = ioremap_nocache(res->start, resource_size(res));
+ if (!addr) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+
+ dev = alloc_candev(sizeof(struct at91_priv));
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_iounmap;
+ }
+
+ dev->netdev_ops = &at91_netdev_ops;
+ dev->irq = irq;
+ dev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(dev);
+ priv->can.clock.freq = clk_get_rate(clk);
+ priv->can.bittiming_const = &at91_bittiming_const;
+ priv->can.do_set_bittiming = at91_set_bittiming;
+ priv->can.do_set_mode = at91_set_mode;
+ priv->reg_base = addr;
+ priv->dev = dev;
+ priv->clk = clk;
+ priv->pdata = pdev->dev.platform_data;
+
+ netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_candev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "registering netdev failed\n");
+ goto exit_free;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+
+ return 0;
+
+ exit_free:
+ free_netdev(dev);
+ exit_iounmap:
+ iounmap(addr);
+ exit_release:
+ release_mem_region(res->start, resource_size(res));
+ exit_put:
+ clk_put(clk);
+ exit:
+ return err;
+}
+
+static int __devexit at91_can_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct at91_priv *priv = netdev_priv(dev);
+ struct resource *res;
+
+ unregister_netdev(dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ free_netdev(dev);
+
+ iounmap(priv->reg_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ clk_put(priv->clk);
+
+ return 0;
+}
+
+static struct platform_driver at91_can_driver = {
+ .probe = at91_can_probe,
+ .remove = __devexit_p(at91_can_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at91_can_module_init(void)
+{
+ printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
+ return platform_driver_register(&at91_can_driver);
+}
+
+static void __exit at91_can_module_exit(void)
+{
+ platform_driver_unregister(&at91_can_driver);
+ printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
+}
+
+module_init(at91_can_module_init);
+module_exit(at91_can_module_exit);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 7d84b8ac9c1..fd04789d337 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -94,12 +94,14 @@ struct ems_pci_card {
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_MEM_SIZE 4096
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_MEM_SIZE 128
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+
static struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -224,7 +226,7 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, mem_size, base_bar;
+ int max_chan, conf_size, base_bar;
int err, i;
/* Enabling PCI device */
@@ -251,22 +253,22 @@ static int __devinit ems_pci_add_card(struct pci_dev *pdev,
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
- mem_size = EMS_PCI_V2_MEM_SIZE;
+ conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
- mem_size = EMS_PCI_V1_MEM_SIZE;
+ conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, mem_size);
+ card->conf_addr = pci_iomap(pdev, 0, conf_size);
if (card->conf_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
}
- card->base_addr = pci_iomap(pdev, base_bar, mem_size);
+ card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
if (card->base_addr == NULL) {
err = -ENOMEM;
goto failure_cleanup;
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 3373560405b..9dd076a626a 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
{.compatible = "nxp,sja1000"},
{},
};
+MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
static struct of_platform_driver sja1000_ofp_driver = {
.owner = THIS_MODULE,
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
new file mode 100644
index 00000000000..c3f75ba701b
--- /dev/null
+++ b/drivers/net/can/usb/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Linux Controller Area Network USB drivers.
+#
+
+obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
new file mode 100644
index 00000000000..9012e0abc62
--- /dev/null
+++ b/drivers/net/can/usb/ems_usb.c
@@ -0,0 +1,1155 @@
+/*
+ * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7
+ *
+ * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+MODULE_AUTHOR("Sebastian Haas <haas@ems-wuensche.com>");
+MODULE_DESCRIPTION("CAN driver for EMS Dr. Thomas Wuensche CAN/USB interfaces");
+MODULE_LICENSE("GPL v2");
+
+/* Control-Values for CPC_Control() Command Subject Selection */
+#define CONTR_CAN_MESSAGE 0x04
+#define CONTR_CAN_STATE 0x0C
+#define CONTR_BUS_ERROR 0x1C
+
+/* Control Command Actions */
+#define CONTR_CONT_OFF 0
+#define CONTR_CONT_ON 1
+#define CONTR_ONCE 2
+
+/* Messages from CPC to PC */
+#define CPC_MSG_TYPE_CAN_FRAME 1 /* CAN data frame */
+#define CPC_MSG_TYPE_RTR_FRAME 8 /* CAN remote frame */
+#define CPC_MSG_TYPE_CAN_PARAMS 12 /* Actual CAN parameters */
+#define CPC_MSG_TYPE_CAN_STATE 14 /* CAN state message */
+#define CPC_MSG_TYPE_EXT_CAN_FRAME 16 /* Extended CAN data frame */
+#define CPC_MSG_TYPE_EXT_RTR_FRAME 17 /* Extended remote frame */
+#define CPC_MSG_TYPE_CONTROL 19 /* change interface behavior */
+#define CPC_MSG_TYPE_CONFIRM 20 /* command processed confirmation */
+#define CPC_MSG_TYPE_OVERRUN 21 /* overrun events */
+#define CPC_MSG_TYPE_CAN_FRAME_ERROR 23 /* detected bus errors */
+#define CPC_MSG_TYPE_ERR_COUNTER 25 /* RX/TX error counter */
+
+/* Messages from the PC to the CPC interface */
+#define CPC_CMD_TYPE_CAN_FRAME 1 /* CAN data frame */
+#define CPC_CMD_TYPE_CONTROL 3 /* control of interface behavior */
+#define CPC_CMD_TYPE_CAN_PARAMS 6 /* set CAN parameters */
+#define CPC_CMD_TYPE_RTR_FRAME 13 /* CAN remote frame */
+#define CPC_CMD_TYPE_CAN_STATE 14 /* CAN state message */
+#define CPC_CMD_TYPE_EXT_CAN_FRAME 15 /* Extended CAN data frame */
+#define CPC_CMD_TYPE_EXT_RTR_FRAME 16 /* Extended CAN remote frame */
+#define CPC_CMD_TYPE_CAN_EXIT 200 /* exit the CAN */
+
+#define CPC_CMD_TYPE_INQ_ERR_COUNTER 25 /* request the CAN error counters */
+#define CPC_CMD_TYPE_CLEAR_MSG_QUEUE 8 /* clear CPC_MSG queue */
+#define CPC_CMD_TYPE_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */
+
+#define CPC_CC_TYPE_SJA1000 2 /* Philips basic CAN controller */
+
+#define CPC_CAN_ECODE_ERRFRAME 0x01 /* Ecode type */
+
+/* Overrun types */
+#define CPC_OVR_EVENT_CAN 0x01
+#define CPC_OVR_EVENT_CANSTATE 0x02
+#define CPC_OVR_EVENT_BUSERROR 0x04
+
+/*
+ * If the CAN controller lost a message we indicate it with the highest bit
+ * set in the count field.
+ */
+#define CPC_OVR_HW 0x80
+
+/* Size of the "struct ems_cpc_msg" without the union */
+#define CPC_MSG_HEADER_LEN 11
+#define CPC_CAN_MSG_MIN_SIZE 5
+
+/* Define these values to match your devices */
+#define USB_CPCUSB_VENDOR_ID 0x12D6
+
+#define USB_CPCUSB_ARM7_PRODUCT_ID 0x0444
+
+/* Mode register NXP LPC2119/SJA1000 CAN Controller */
+#define SJA1000_MOD_NORMAL 0x00
+#define SJA1000_MOD_RM 0x01
+
+/* ECC register NXP LPC2119/SJA1000 CAN Controller */
+#define SJA1000_ECC_SEG 0x1F
+#define SJA1000_ECC_DIR 0x20
+#define SJA1000_ECC_ERR 0x06
+#define SJA1000_ECC_BIT 0x00
+#define SJA1000_ECC_FORM 0x40
+#define SJA1000_ECC_STUFF 0x80
+#define SJA1000_ECC_MASK 0xc0
+
+/* Status register content */
+#define SJA1000_SR_BS 0x80
+#define SJA1000_SR_ES 0x40
+
+#define SJA1000_DEFAULT_OUTPUT_CONTROL 0xDA
+
+/*
+ * The device actually uses a 16MHz clock to generate the CAN clock
+ * but it expects SJA1000 bit settings based on 8MHz (is internally
+ * converted).
+ */
+#define EMS_USB_ARM7_CLOCK 8000000
+
+/*
+ * CAN-Message representation in a CPC_MSG. Message object type is
+ * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+ * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME.
+ */
+struct cpc_can_msg {
+ u32 id;
+ u8 length;
+ u8 msg[8];
+};
+
+/* Representation of the CAN parameters for the SJA1000 controller */
+struct cpc_sja1000_params {
+ u8 mode;
+ u8 acc_code0;
+ u8 acc_code1;
+ u8 acc_code2;
+ u8 acc_code3;
+ u8 acc_mask0;
+ u8 acc_mask1;
+ u8 acc_mask2;
+ u8 acc_mask3;
+ u8 btr0;
+ u8 btr1;
+ u8 outp_contr;
+};
+
+/* CAN params message representation */
+struct cpc_can_params {
+ u8 cc_type;
+
+ /* Will support M16C CAN controller in the future */
+ union {
+ struct cpc_sja1000_params sja1000;
+ } cc_params;
+};
+
+/* Structure for confirmed message handling */
+struct cpc_confirm {
+ u8 error; /* error code */
+};
+
+/* Structure for overrun conditions */
+struct cpc_overrun {
+ u8 event;
+ u8 count;
+};
+
+/* SJA1000 CAN errors (compatible to NXP LPC2119) */
+struct cpc_sja1000_can_error {
+ u8 ecc;
+ u8 rxerr;
+ u8 txerr;
+};
+
+/* structure for CAN error conditions */
+struct cpc_can_error {
+ u8 ecode;
+
+ struct {
+ u8 cc_type;
+
+ /* Other controllers may also provide error code capture regs */
+ union {
+ struct cpc_sja1000_can_error sja1000;
+ } regs;
+ } cc;
+};
+
+/*
+ * Structure containing RX/TX error counter. This structure is used to request
+ * the values of the CAN controllers TX and RX error counter.
+ */
+struct cpc_can_err_counter {
+ u8 rx;
+ u8 tx;
+};
+
+/* Main message type used between library and application */
+struct __attribute__ ((packed)) ems_cpc_msg {
+ u8 type; /* type of message */
+ u8 length; /* length of data within union 'msg' */
+ u8 msgid; /* confirmation handle */
+ u32 ts_sec; /* timestamp in seconds */
+ u32 ts_nsec; /* timestamp in nano seconds */
+
+ union {
+ u8 generic[64];
+ struct cpc_can_msg can_msg;
+ struct cpc_can_params can_params;
+ struct cpc_confirm confirmation;
+ struct cpc_overrun overrun;
+ struct cpc_can_error error;
+ struct cpc_can_err_counter err_counter;
+ u8 can_state;
+ } msg;
+};
+
+/*
+ * Table of devices that work with this driver
+ * NOTE: This driver supports only CPC-USB/ARM7 (LPC2119) yet.
+ */
+static struct usb_device_id ems_usb_table[] = {
+ {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_ARM7_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ems_usb_table);
+
+#define RX_BUFFER_SIZE 64
+#define CPC_HEADER_SIZE 4
+#define INTR_IN_BUFFER_SIZE 4
+
+#define MAX_RX_URBS 10
+#define MAX_TX_URBS CAN_ECHO_SKB_MAX
+
+struct ems_usb;
+
+struct ems_tx_urb_context {
+ struct ems_usb *dev;
+
+ u32 echo_index;
+ u8 dlc;
+};
+
+struct ems_usb {
+ struct can_priv can; /* must be the first member */
+ int open_time;
+
+ struct sk_buff *echo_skb[MAX_TX_URBS];
+
+ struct usb_device *udev;
+ struct net_device *netdev;
+
+ atomic_t active_tx_urbs;
+ struct usb_anchor tx_submitted;
+ struct ems_tx_urb_context tx_contexts[MAX_TX_URBS];
+
+ struct usb_anchor rx_submitted;
+
+ struct urb *intr_urb;
+
+ u8 *tx_msg_buffer;
+
+ u8 *intr_in_buffer;
+ unsigned int free_slots; /* remember number of available slots */
+
+ struct ems_cpc_msg active_params; /* active controller parameters */
+};
+
+static void ems_usb_read_interrupt_callback(struct urb *urb)
+{
+ struct ems_usb *dev = urb->context;
+ struct net_device *netdev = dev->netdev;
+ int err;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0:
+ dev->free_slots = dev->intr_in_buffer[1];
+ break;
+
+ case -ECONNRESET: /* unlink */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+
+ default:
+ dev_info(netdev->dev.parent, "Rx interrupt aborted %d\n",
+ urb->status);
+ break;
+ }
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else if (err)
+ dev_err(netdev->dev.parent,
+ "failed resubmitting intr urb: %d\n", err);
+
+ return;
+}
+
+static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ int i;
+ struct net_device_stats *stats = &dev->netdev->stats;
+
+ skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+ if (skb == NULL)
+ return;
+
+ skb->protocol = htons(ETH_P_CAN);
+
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+ cf->can_id = msg->msg.can_msg.id;
+ cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
+
+ if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME
+ || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ if (msg->type == CPC_MSG_TYPE_RTR_FRAME
+ || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = msg->msg.can_msg.msg[i];
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &dev->netdev->stats;
+
+ skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame));
+ if (skb == NULL)
+ return;
+
+ skb->protocol = htons(ETH_P_CAN);
+
+ cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ memset(cf, 0, sizeof(struct can_frame));
+
+ cf->can_id = CAN_ERR_FLAG;
+ cf->can_dlc = CAN_ERR_DLC;
+
+ if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
+ u8 state = msg->msg.can_state;
+
+ if (state & SJA1000_SR_BS) {
+ dev->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+
+ can_bus_off(dev->netdev);
+ } else if (state & SJA1000_SR_ES) {
+ dev->can.state = CAN_STATE_ERROR_WARNING;
+ dev->can.can_stats.error_warning++;
+ } else {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dev->can.can_stats.error_passive++;
+ }
+ } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) {
+ u8 ecc = msg->msg.error.cc.regs.sja1000.ecc;
+ u8 txerr = msg->msg.error.cc.regs.sja1000.txerr;
+ u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr;
+
+ /* bus error interrupt */
+ dev->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (ecc & SJA1000_ECC_MASK) {
+ case SJA1000_ECC_BIT:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case SJA1000_ECC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case SJA1000_ECC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
+ break;
+ }
+
+ /* Error occured during transmission? */
+ if ((ecc & SJA1000_ECC_DIR) == 0)
+ cf->data[2] |= CAN_ERR_PROT_TX;
+
+ if (dev->can.state == CAN_STATE_ERROR_WARNING ||
+ dev->can.state == CAN_STATE_ERROR_PASSIVE) {
+ cf->data[1] = (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ } else if (msg->type == CPC_MSG_TYPE_OVERRUN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void ems_usb_read_bulk_callback(struct urb *urb)
+{
+ struct ems_usb *dev = urb->context;
+ struct net_device *netdev;
+ int retval;
+
+ netdev = dev->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+
+ case -ENOENT:
+ return;
+
+ default:
+ dev_info(netdev->dev.parent, "Rx URB aborted (%d)\n",
+ urb->status);
+ goto resubmit_urb;
+ }
+
+ if (urb->actual_length > CPC_HEADER_SIZE) {
+ struct ems_cpc_msg *msg;
+ u8 *ibuf = urb->transfer_buffer;
+ u8 msg_count, again, start;
+
+ msg_count = ibuf[0] & ~0x80;
+ again = ibuf[0] & 0x80;
+
+ start = CPC_HEADER_SIZE;
+
+ while (msg_count) {
+ msg = (struct ems_cpc_msg *)&ibuf[start];
+
+ switch (msg->type) {
+ case CPC_MSG_TYPE_CAN_STATE:
+ /* Process CAN state changes */
+ ems_usb_rx_err(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_CAN_FRAME:
+ case CPC_MSG_TYPE_EXT_CAN_FRAME:
+ case CPC_MSG_TYPE_RTR_FRAME:
+ case CPC_MSG_TYPE_EXT_RTR_FRAME:
+ ems_usb_rx_can_msg(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_CAN_FRAME_ERROR:
+ /* Process errorframe */
+ ems_usb_rx_err(dev, msg);
+ break;
+
+ case CPC_MSG_TYPE_OVERRUN:
+ /* Message lost while receiving */
+ ems_usb_rx_err(dev, msg);
+ break;
+ }
+
+ start += CPC_MSG_HEADER_LEN + msg->length;
+ msg_count--;
+
+ if (start > urb->transfer_buffer_length) {
+ dev_err(netdev->dev.parent, "format error\n");
+ break;
+ }
+ }
+ }
+
+resubmit_urb:
+ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
+ urb->transfer_buffer, RX_BUFFER_SIZE,
+ ems_usb_read_bulk_callback, dev);
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (retval == -ENODEV)
+ netif_device_detach(netdev);
+ else if (retval)
+ dev_err(netdev->dev.parent,
+ "failed resubmitting read bulk urb: %d\n", retval);
+
+ return;
+}
+
+/*
+ * callback for bulk IN urb
+ */
+static void ems_usb_write_bulk_callback(struct urb *urb)
+{
+ struct ems_tx_urb_context *context = urb->context;
+ struct ems_usb *dev;
+ struct net_device *netdev;
+
+ BUG_ON(!context);
+
+ dev = context->dev;
+ netdev = dev->netdev;
+
+ /* free up our allocated buffer */
+ usb_buffer_free(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (urb->status)
+ dev_info(netdev->dev.parent, "Tx URB aborted (%d)\n",
+ urb->status);
+
+ netdev->trans_start = jiffies;
+
+ /* transmission complete interrupt */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += context->dlc;
+
+ can_get_echo_skb(netdev, context->echo_index);
+
+ /* Release context */
+ context->echo_index = MAX_TX_URBS;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+/*
+ * Send the given CPC command synchronously
+ */
+static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
+{
+ int actual_length;
+
+ /* Copy payload */
+ memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg,
+ msg->length + CPC_MSG_HEADER_LEN);
+
+ /* Clear header */
+ memset(&dev->tx_msg_buffer[0], 0, CPC_HEADER_SIZE);
+
+ return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+ &dev->tx_msg_buffer[0],
+ msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE,
+ &actual_length, 1000);
+}
+
+/*
+ * Change CAN controllers' mode register
+ */
+static int ems_usb_write_mode(struct ems_usb *dev, u8 mode)
+{
+ dev->active_params.msg.can_params.cc_params.sja1000.mode = mode;
+
+ return ems_usb_command_msg(dev, &dev->active_params);
+}
+
+/*
+ * Send a CPC_Control command to change behaviour when interface receives a CAN
+ * message, bus error or CAN state changed notifications.
+ */
+static int ems_usb_control_cmd(struct ems_usb *dev, u8 val)
+{
+ struct ems_cpc_msg cmd;
+
+ cmd.type = CPC_CMD_TYPE_CONTROL;
+ cmd.length = CPC_MSG_HEADER_LEN + 1;
+
+ cmd.msgid = 0;
+
+ cmd.msg.generic[0] = val;
+
+ return ems_usb_command_msg(dev, &cmd);
+}
+
+/*
+ * Start interface
+ */
+static int ems_usb_start(struct ems_usb *dev)
+{
+ struct net_device *netdev = dev->netdev;
+ int err, i;
+
+ dev->intr_in_buffer[0] = 0;
+ dev->free_slots = 15; /* initial size */
+
+ for (i = 0; i < MAX_RX_URBS; i++) {
+ struct urb *urb = NULL;
+ u8 *buf = NULL;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ dev_err(netdev->dev.parent,
+ "No memory left for URBs\n");
+ return -ENOMEM;
+ }
+
+ buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ dev_err(netdev->dev.parent,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
+ buf, RX_BUFFER_SIZE,
+ ems_usb_read_bulk_callback, dev);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->rx_submitted);
+
+ err = usb_submit_urb(urb, GFP_KERNEL);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf,
+ urb->transfer_dma);
+ break;
+ }
+
+ /* Drop reference, USB core will take care of freeing it */
+ usb_free_urb(urb);
+ }
+
+ /* Did we submit any URBs */
+ if (i == 0) {
+ dev_warn(netdev->dev.parent, "couldn't setup read URBs\n");
+ return err;
+ }
+
+ /* Warn if we've couldn't transmit all the URBs */
+ if (i < MAX_RX_URBS)
+ dev_warn(netdev->dev.parent, "rx performance may be slow\n");
+
+ /* Setup and start interrupt URB */
+ usb_fill_int_urb(dev->intr_urb, dev->udev,
+ usb_rcvintpipe(dev->udev, 1),
+ dev->intr_in_buffer,
+ INTR_IN_BUFFER_SIZE,
+ ems_usb_read_interrupt_callback, dev, 1);
+
+ err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
+ err);
+
+ return err;
+ }
+
+ /* CPC-USB will transfer received message to host */
+ err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ /* CPC-USB will transfer CAN state changes to host */
+ err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ /* CPC-USB will transfer bus errors to host */
+ err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON);
+ if (err)
+ goto failed;
+
+ err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL);
+ if (err)
+ goto failed;
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return 0;
+
+failed:
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
+
+ return err;
+}
+
+static void unlink_all_urbs(struct ems_usb *dev)
+{
+ int i;
+
+ usb_unlink_urb(dev->intr_urb);
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ dev->tx_contexts[i].echo_index = MAX_TX_URBS;
+}
+
+static int ems_usb_open(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ int err;
+
+ err = ems_usb_write_mode(dev, SJA1000_MOD_RM);
+ if (err)
+ return err;
+
+ /* common open */
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ /* finally start device */
+ err = ems_usb_start(dev);
+ if (err) {
+ if (err == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ dev_warn(netdev->dev.parent, "couldn't start device: %d\n",
+ err);
+
+ close_candev(netdev);
+
+ return err;
+ }
+
+ dev->open_time = jiffies;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ struct ems_tx_urb_context *context = NULL;
+ struct net_device_stats *stats = &netdev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct ems_cpc_msg *msg;
+ struct urb *urb;
+ u8 *buf;
+ int i, err;
+ size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ + sizeof(struct cpc_can_msg);
+
+ /* create a URB, and a buffer for it, and copy the data to the URB */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ dev_err(netdev->dev.parent, "No memory left for URBs\n");
+ goto nomem;
+ }
+
+ buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
+ if (!buf) {
+ dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ goto nomem;
+ }
+
+ msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
+
+ msg->msg.can_msg.id = cf->can_id & CAN_ERR_MASK;
+ msg->msg.can_msg.length = cf->can_dlc;
+
+ if (cf->can_id & CAN_RTR_FLAG) {
+ msg->type = cf->can_id & CAN_EFF_FLAG ?
+ CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME;
+
+ msg->length = CPC_CAN_MSG_MIN_SIZE;
+ } else {
+ msg->type = cf->can_id & CAN_EFF_FLAG ?
+ CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;
+
+ for (i = 0; i < cf->can_dlc; i++)
+ msg->msg.can_msg.msg[i] = cf->data[i];
+
+ msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
+ }
+
+ for (i = 0; i < MAX_TX_URBS; i++) {
+ if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
+ context = &dev->tx_contexts[i];
+ break;
+ }
+ }
+
+ /*
+ * May never happen! When this happens we'd more URBs in flight as
+ * allowed (MAX_TX_URBS).
+ */
+ if (!context) {
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+
+ dev_warn(netdev->dev.parent, "couldn't find free context\n");
+
+ return NETDEV_TX_BUSY;
+ }
+
+ context->dev = dev;
+ context->echo_index = i;
+ context->dlc = cf->can_dlc;
+
+ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
+ size, ems_usb_write_bulk_callback, context);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, context->echo_index);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index);
+
+ usb_unanchor_urb(urb);
+ usb_buffer_free(dev->udev, size, buf, urb->transfer_dma);
+ dev_kfree_skb(skb);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (err == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ dev_warn(netdev->dev.parent, "failed tx_urb %d\n", err);
+
+ stats->tx_dropped++;
+ }
+ } else {
+ netdev->trans_start = jiffies;
+
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+ dev->free_slots < 5) {
+ netif_stop_queue(netdev);
+ }
+ }
+
+ /*
+ * Release our reference to this URB, the USB core will eventually free
+ * it entirely.
+ */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+nomem:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ stats->tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static int ems_usb_close(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+
+ /* Stop polling */
+ unlink_all_urbs(dev);
+
+ netif_stop_queue(netdev);
+
+ /* Set CAN controller to reset mode */
+ if (ems_usb_write_mode(dev, SJA1000_MOD_RM))
+ dev_warn(netdev->dev.parent, "couldn't stop device");
+
+ close_candev(netdev);
+
+ dev->open_time = 0;
+
+ return 0;
+}
+
+static const struct net_device_ops ems_usb_netdev_ops = {
+ .ndo_open = ems_usb_open,
+ .ndo_stop = ems_usb_close,
+ .ndo_start_xmit = ems_usb_start_xmit,
+};
+
+static struct can_bittiming_const ems_usb_bittiming_const = {
+ .name = "ems_usb",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+
+ if (!dev->open_time)
+ return -EINVAL;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
+ dev_warn(netdev->dev.parent, "couldn't start device");
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ems_usb_set_bittiming(struct net_device *netdev)
+{
+ struct ems_usb *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ dev_info(netdev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
+ btr0, btr1);
+
+ dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0;
+ dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1;
+
+ return ems_usb_command_msg(dev, &dev->active_params);
+}
+
+static void init_params_sja1000(struct ems_cpc_msg *msg)
+{
+ struct cpc_sja1000_params *sja1000 =
+ &msg->msg.can_params.cc_params.sja1000;
+
+ msg->type = CPC_CMD_TYPE_CAN_PARAMS;
+ msg->length = sizeof(struct cpc_can_params);
+ msg->msgid = 0;
+
+ msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000;
+
+ /* Acceptance filter open */
+ sja1000->acc_code0 = 0x00;
+ sja1000->acc_code1 = 0x00;
+ sja1000->acc_code2 = 0x00;
+ sja1000->acc_code3 = 0x00;
+
+ /* Acceptance filter open */
+ sja1000->acc_mask0 = 0xFF;
+ sja1000->acc_mask1 = 0xFF;
+ sja1000->acc_mask2 = 0xFF;
+ sja1000->acc_mask3 = 0xFF;
+
+ sja1000->btr0 = 0;
+ sja1000->btr1 = 0;
+
+ sja1000->outp_contr = SJA1000_DEFAULT_OUTPUT_CONTROL;
+ sja1000->mode = SJA1000_MOD_RM;
+}
+
+/*
+ * probe function for new CPC-USB devices
+ */
+static int ems_usb_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct net_device *netdev;
+ struct ems_usb *dev;
+ int i, err = -ENOMEM;
+
+ netdev = alloc_candev(sizeof(struct ems_usb));
+ if (!netdev) {
+ dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ dev = netdev_priv(netdev);
+
+ dev->udev = interface_to_usbdev(intf);
+ dev->netdev = netdev;
+
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = EMS_USB_ARM7_CLOCK;
+ dev->can.bittiming_const = &ems_usb_bittiming_const;
+ dev->can.do_set_bittiming = ems_usb_set_bittiming;
+ dev->can.do_set_mode = ems_usb_set_mode;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ netdev->netdev_ops = &ems_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support local echo */
+
+ init_usb_anchor(&dev->rx_submitted);
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ for (i = 0; i < MAX_TX_URBS; i++)
+ dev->tx_contexts[i].echo_index = MAX_TX_URBS;
+
+ dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->intr_urb) {
+ dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n");
+ goto cleanup_candev;
+ }
+
+ dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
+ if (!dev->intr_in_buffer) {
+ dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n");
+ goto cleanup_intr_urb;
+ }
+
+ dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
+ sizeof(struct ems_cpc_msg), GFP_KERNEL);
+ if (!dev->tx_msg_buffer) {
+ dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n");
+ goto cleanup_intr_in_buffer;
+ }
+
+ usb_set_intfdata(intf, dev);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ init_params_sja1000(&dev->active_params);
+
+ err = ems_usb_command_msg(dev, &dev->active_params);
+ if (err) {
+ dev_err(netdev->dev.parent,
+ "couldn't initialize controller: %d\n", err);
+ goto cleanup_tx_msg_buffer;
+ }
+
+ err = register_candev(netdev);
+ if (err) {
+ dev_err(netdev->dev.parent,
+ "couldn't register CAN device: %d\n", err);
+ goto cleanup_tx_msg_buffer;
+ }
+
+ return 0;
+
+cleanup_tx_msg_buffer:
+ kfree(dev->tx_msg_buffer);
+
+cleanup_intr_in_buffer:
+ kfree(dev->intr_in_buffer);
+
+cleanup_intr_urb:
+ usb_free_urb(dev->intr_urb);
+
+cleanup_candev:
+ free_candev(netdev);
+
+ return err;
+}
+
+/*
+ * called by the usb core when the device is removed from the system
+ */
+static void ems_usb_disconnect(struct usb_interface *intf)
+{
+ struct ems_usb *dev = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ if (dev) {
+ unregister_netdev(dev->netdev);
+ free_candev(dev->netdev);
+
+ unlink_all_urbs(dev);
+
+ usb_free_urb(dev->intr_urb);
+
+ kfree(dev->intr_in_buffer);
+ }
+}
+
+/* usb specific object needed to register this driver with the usb subsystem */
+static struct usb_driver ems_usb_driver = {
+ .name = "ems_usb",
+ .probe = ems_usb_probe,
+ .disconnect = ems_usb_disconnect,
+ .id_table = ems_usb_table,
+};
+
+static int __init ems_usb_init(void)
+{
+ int err;
+
+ printk(KERN_INFO "CPC-USB kernel driver loaded\n");
+
+ /* register this driver with the USB subsystem */
+ err = usb_register(&ems_usb_driver);
+
+ if (err) {
+ err("usb_register failed. Error number %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit ems_usb_exit(void)
+{
+ /* deregister this driver with the USB subsystem */
+ usb_deregister(&ems_usb_driver);
+}
+
+module_init(ems_usb_init);
+module_exit(ems_usb_exit);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index d45eacb7670..46c87ec7960 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -85,8 +85,6 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
cp->uio_dev = iminor(inode);
- cnic_shutdown_bnx2_rx_ring(dev);
-
cnic_init_bnx2_tx_ring(dev);
cnic_init_bnx2_rx_ring(dev);
@@ -98,6 +96,8 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
struct cnic_dev *dev = uinfo->priv;
struct cnic_local *cp = dev->cnic_priv;
+ cnic_shutdown_bnx2_rx_ring(dev);
+
cp->uio_dev = -1;
return 0;
}
@@ -2733,7 +2733,8 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
cnic_ulp_init(dev);
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
- else if (event == NETDEV_UP) {
+
+ if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index a49235739ee..d8b09efdcb5 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.0.0"
-#define CNIC_MODULE_RELDATE "May 21, 2009"
+#define CNIC_MODULE_VERSION "2.0.1"
+#define CNIC_MODULE_RELDATE "Oct 01, 2009"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 3e3fab8afb1..61f9da2b494 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -1109,7 +1109,7 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id;
- char mdio_bus_id[BUS_ID_SIZE];
+ char mdio_bus_id[MII_BUS_ID_SIZE];
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
@@ -1118,7 +1118,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (external_switch || dumb_switch) {
- strncpy(mdio_bus_id, "0", BUS_ID_SIZE); /* fixed phys bus */
+ strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
} else {
for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
@@ -1126,7 +1126,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
continue;
if (!cpmac_mii->phy_map[phy_id])
continue;
- strncpy(mdio_bus_id, cpmac_mii->id, BUS_ID_SIZE);
+ strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
break;
}
}
@@ -1167,7 +1167,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
- snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 15c0195ebd3..a24be34a3f7 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev)
e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
- /* Renegotiate with link partner */
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
if (autoneg_normal) {
- data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
- data |= BMCR_ANENABLE | BMCR_ANRESTART;
+ /* Renegotiate with link partner */
+ data |= BMCR_ANENABLE | BMCR_ANRESTART;
+ } else {
+ /* Don't negotiate speed or duplex */
+ data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
+
+ /* Set speed and duplex static */
+ if (current_speed_selection == 10)
+ data &= ~BMCR_SPEED100;
+ else
+ data |= BMCR_SPEED100;
+
+ if (current_duplex != full)
+ data &= ~BMCR_FULLDPLX;
+ else
+ data |= BMCR_FULLDPLX;
}
e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
}
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa796c..f72c56dec33 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/** NOTE:: For DM646x the IN_VECTOR has changed */
#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
+
/* CPPI bit positions */
#define EMAC_CPPI_SOP_BIT BIT(31)
@@ -330,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
+
/** net_buf_obj: EMAC network bufferdata structure
*
* EMAC network buffer data structure
@@ -2167,7 +2173,11 @@ static int emac_poll(struct napi_struct *napi, int budget)
emac_int_enable(priv);
}
- if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) {
+ mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+ if (unlikely(status & mask)) {
u32 ch, cause;
dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
netif_stop_queue(ndev);
@@ -2541,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev)
static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
+ u32 mac_control;
+ u32 stats_clear_mask;
/* update emac hardware stats and reset the registers*/
+ mac_control = emac_read(EMAC_MACCONTROL);
+
+ if (mac_control & EMAC_MACCONTROL_GMIIEN)
+ stats_clear_mask = EMAC_STATS_CLR_MASK;
+ else
+ stats_clear_mask = 0;
+
priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
- emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
emac_read(EMAC_TXSINGLECOLL) +
emac_read(EMAC_TXMULTICOLL));
- emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE);
- emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE);
- emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+ emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+ emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
emac_read(EMAC_RXJABBER) +
emac_read(EMAC_RXUNDERSIZED));
- emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE);
- emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE);
- emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+ emac_write(EMAC_RXJABBER, stats_clear_mask);
+ emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
emac_read(EMAC_RXMOFOVERRUNS));
- emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
- emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+ emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
- emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
priv->net_dev_stats.tx_carrier_errors +=
emac_read(EMAC_TXCARRIERSENSE);
- emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
- emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE);
+ emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
return &priv->net_dev_stats;
}
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 9686c1fa28f..7a3bdac84ab 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -237,6 +237,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 679965c2bb8..5d2f48f0225 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -151,6 +151,7 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 1a4f89c66a2..42e2b7e21c2 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -149,7 +149,6 @@ do { \
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_ICH8_APME 0x0004
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
@@ -293,7 +292,6 @@ struct e1000_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
- u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
u32 rx_int_delay;
u32 rx_abs_int_delay;
@@ -317,7 +315,6 @@ struct e1000_adapter {
struct e1000_rx_ring test_rx_ring;
int msg_enable;
- bool have_msi;
/* to not mess up cache alignment, always add to the bottom */
bool tso_force;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 27f996a2010..490b2b7cd3a 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -82,7 +82,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_long_byte_count", E1000_STAT(stats.gorcl) },
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
- { "rx_header_split", E1000_STAT(rx_hdr_split) },
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
{ "tx_smbus", E1000_STAT(stats.mgptc) },
{ "rx_smbus", E1000_STAT(stats.mgprc) },
@@ -114,8 +113,6 @@ static int e1000_get_settings(struct net_device *netdev,
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
- if (hw->phy_type == e1000_phy_ife)
- ecmd->supported &= ~SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -178,14 +175,6 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /* When SoL/IDER sessions are active, autoneg/speed/duplex
- * cannot be changed */
- if (e1000_check_phy_reset_block(hw)) {
- DPRINTK(DRV, ERR, "Cannot change link characteristics "
- "when SoL/IDER is active.\n");
- return -EINVAL;
- }
-
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -330,10 +319,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
else
netdev->features &= ~NETIF_F_TSO;
- if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
- netdev->features |= NETIF_F_TSO6;
- else
- netdev->features &= ~NETIF_F_TSO6;
+ netdev->features &= ~NETIF_F_TSO6;
DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = true;
@@ -441,7 +427,6 @@ static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
if (hw->mac_type >= e1000_82540 &&
- hw->mac_type < e1000_82571 &&
hw->media_type == e1000_media_type_copper) {
regs_buff[26] = er32(MANC);
}
@@ -554,10 +539,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed
- * and flush shadow RAM for 82573 conrollers */
- if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
- (hw->mac_type == e1000_82573)))
+ /* Update the checksum over the first part of the EEPROM if needed */
+ if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
e1000_update_eeprom_checksum(hw);
kfree(eeprom_buff);
@@ -568,31 +551,12 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
char firmware_version[32];
- u16 eeprom_data;
strncpy(drvinfo->driver, e1000_driver_name, 32);
strncpy(drvinfo->version, e1000_driver_version, 32);
- /* EEPROM image version # is reported as firmware version # for
- * 8257{1|2|3} controllers */
- e1000_read_eeprom(hw, 5, 1, &eeprom_data);
- switch (hw->mac_type) {
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- sprintf(firmware_version, "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
- break;
- default:
- sprintf(firmware_version, "N/A");
- }
-
+ sprintf(firmware_version, "N/A");
strncpy(drvinfo->fw_version, firmware_version, 32);
strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->regdump_len = e1000_get_regs_len(netdev);
@@ -781,21 +745,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
- switch (hw->mac_type) {
+
/* there are several bits on newer hardware that are r/w */
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- toggle = 0x7FFFF3FF;
- break;
- case e1000_82573:
- case e1000_ich8lan:
- toggle = 0x7FFFF033;
- break;
- default:
- toggle = 0xFFFFF833;
- break;
- }
+ toggle = 0xFFFFF833;
before = er32(STATUS);
value = (er32(STATUS) & toggle);
@@ -810,12 +762,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* restore previous status */
ew32(STATUS, before);
- if (hw->mac_type != e1000_ich8lan) {
- REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
- REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
- }
+ REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
+ REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -830,8 +780,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
- before = (hw->mac_type == e1000_ich8lan ?
- 0x06C3B33E : 0x06DFB3FE);
+ before = 0x06DFB3FE;
REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
@@ -839,12 +788,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
- if (hw->mac_type != e1000_ich8lan)
- REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
+ REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
- value = (hw->mac_type == e1000_ich8lan ?
- E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ value = E1000_RAR_ENTRIES;
for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
@@ -859,8 +806,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
- value = (hw->mac_type == e1000_ich8lan ?
- E1000_MC_TBL_SIZE_ICH8LAN : E1000_MC_TBL_SIZE);
+ value = E1000_MC_TBL_SIZE;
for (i = 0; i < value; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
@@ -933,9 +879,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
- if (hw->mac_type == e1000_ich8lan && i == 8)
- continue;
-
/* Interrupt to test */
mask = 1 << i;
@@ -1289,35 +1232,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
/* autoneg off */
e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
- } else if (hw->phy_type == e1000_phy_gg82563)
- e1000_write_phy_reg(hw,
- GG82563_PHY_KMRN_MODE_CTRL,
- 0x1CC);
+ }
ctrl_reg = er32(CTRL);
- if (hw->phy_type == e1000_phy_ife) {
- /* force 100, set loopback */
- e1000_write_phy_reg(hw, PHY_CTRL, 0x6100);
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_100 |/* Force Speed to 100 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
- } else {
- /* force 1000, set loopback */
- e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
-
- /* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = er32(CTRL);
- ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
- ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
- E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
- E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
- }
+ /* Now set up the MAC to the same speed/duplex as the PHY. */
+ ctrl_reg = er32(CTRL);
+ ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+ ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+ E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+ E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
@@ -1373,14 +1301,8 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
case e1000_82541_rev_2:
case e1000_82547:
case e1000_82547_rev_2:
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- case e1000_ich8lan:
return e1000_integrated_phy_loopback(adapter);
break;
-
default:
/* Default PHY loopback work is to read the MII
* control register and assert bit 14 (loopback mode).
@@ -1409,14 +1331,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
case e1000_82546_rev_3:
return e1000_set_phy_loopback(adapter);
break;
- case e1000_82571:
- case e1000_82572:
-#define E1000_SERDES_LB_ON 0x410
- e1000_set_phy_loopback(adapter);
- ew32(SCTL, E1000_SERDES_LB_ON);
- msleep(10);
- return 0;
- break;
default:
rctl = er32(RCTL);
rctl |= E1000_RCTL_LBM_TCVR;
@@ -1440,26 +1354,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
switch (hw->mac_type) {
- case e1000_82571:
- case e1000_82572:
- if (hw->media_type == e1000_media_type_fiber ||
- hw->media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
- ew32(SCTL, E1000_SERDES_LB_OFF);
- msleep(10);
- break;
- }
- /* Fall Through */
case e1000_82545:
case e1000_82546:
case e1000_82545_rev_3:
case e1000_82546_rev_3:
default:
hw->autoneg = true;
- if (hw->phy_type == e1000_phy_gg82563)
- e1000_write_phy_reg(hw,
- GG82563_PHY_KMRN_MODE_CTRL,
- 0x180);
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
@@ -1560,17 +1460,6 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
- struct e1000_hw *hw = &adapter->hw;
-
- /* PHY loopback cannot be performed if SoL/IDER
- * sessions are active */
- if (e1000_check_phy_reset_block(hw)) {
- DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
- *data = 0;
- goto out;
- }
-
*data = e1000_setup_desc_rings(adapter);
if (*data)
goto out;
@@ -1592,13 +1481,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->media_type == e1000_media_type_internal_serdes) {
int i = 0;
- hw->serdes_link_down = true;
+ hw->serdes_has_link = false;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
e1000_check_for_link(hw);
- if (!hw->serdes_link_down)
+ if (hw->serdes_has_link)
return *data;
msleep(20);
} while (i++ < 3750);
@@ -1716,15 +1605,11 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
case E1000_DEV_ID_82545EM_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_PCIE:
- case E1000_DEV_ID_82571EB_SERDES_QUAD:
/* these don't support WoL at all */
wol->supported = 0;
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
- case E1000_DEV_ID_82571EB_FIBER:
- case E1000_DEV_ID_82571EB_SERDES:
- case E1000_DEV_ID_82571EB_COPPER:
/* Wake events not supported on port B */
if (er32(STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
@@ -1733,10 +1618,6 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
/* return success for non excluded adapter ports */
retval = 0;
break;
- case E1000_DEV_ID_82571EB_QUAD_COPPER:
- case E1000_DEV_ID_82571EB_QUAD_FIBER:
- case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
- case E1000_DEV_ID_82571PT_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
@@ -1872,30 +1753,15 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
if (!data)
data = INT_MAX;
- if (hw->mac_type < e1000_82571) {
- if (!adapter->blink_timer.function) {
- init_timer(&adapter->blink_timer);
- adapter->blink_timer.function = e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long)adapter;
- }
- e1000_setup_led(hw);
- mod_timer(&adapter->blink_timer, jiffies);
- msleep_interruptible(data * 1000);
- del_timer_sync(&adapter->blink_timer);
- } else if (hw->phy_type == e1000_phy_ife) {
- if (!adapter->blink_timer.function) {
- init_timer(&adapter->blink_timer);
- adapter->blink_timer.function = e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long)adapter;
- }
- mod_timer(&adapter->blink_timer, jiffies);
- msleep_interruptible(data * 1000);
- del_timer_sync(&adapter->blink_timer);
- e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
- } else {
- e1000_blink_led_start(hw);
- msleep_interruptible(data * 1000);
+ if (!adapter->blink_timer.function) {
+ init_timer(&adapter->blink_timer);
+ adapter->blink_timer.function = e1000_led_blink_callback;
+ adapter->blink_timer.data = (unsigned long)adapter;
}
+ e1000_setup_led(hw);
+ mod_timer(&adapter->blink_timer, jiffies);
+ msleep_interruptible(data * 1000);
+ del_timer_sync(&adapter->blink_timer);
e1000_led_off(hw);
clear_bit(E1000_LED_ON, &adapter->led_status);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 45ac225a7aa..8d7d87f1282 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -24,88 +24,34 @@
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*******************************************************************************/
+ */
/* e1000_hw.c
* Shared functions for accessing and configuring the MAC
*/
-
#include "e1000_hw.h"
-static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
-static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
-static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
-static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
-static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
-static void e1000_release_software_semaphore(struct e1000_hw *hw);
-
-static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
static s32 e1000_check_downshift(struct e1000_hw *hw);
static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity *polarity);
static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
static void e1000_clear_vfta(struct e1000_hw *hw);
-static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
bool link_up);
static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
-static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 *max_length);
-static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static s32 e1000_get_software_flag(struct e1000_hw *hw);
-static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
-static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
static s32 e1000_id_led_init(struct e1000_hw *hw);
-static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
- u32 cnf_base_addr,
- u32 cnf_size);
-static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
static void e1000_init_rx_addrs(struct e1000_hw *hw);
-static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
-static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
-static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
-static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
- u16 offset, u8 *sum);
-static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw,
- struct e1000_host_mng_command_header
- *hdr);
-static s32 e1000_mng_write_commit(struct e1000_hw *hw);
-static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info);
static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
struct e1000_phy_info *phy_info);
-static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
struct e1000_phy_info *phy_info);
-static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
-static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
-static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index,
- u8 byte);
-static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
-static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
-static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
- u16 *data);
-static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
- u16 data);
-static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static void e1000_release_software_flag(struct e1000_hw *hw);
static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
-static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
static s32 e1000_set_phy_type(struct e1000_hw *hw);
@@ -117,12 +63,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
-static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
- u16 count);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count);
static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
+ u16 words, u16 *data);
static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
@@ -131,7 +76,7 @@ static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count);
static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u16 phy_data);
-static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
+static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u16 *phy_data);
static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
@@ -140,188 +85,164 @@ static void e1000_standby_eeprom(struct e1000_hw *hw);
static s32 e1000_set_vco_speed(struct e1000_hw *hw);
static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
static s32 e1000_set_phy_mode(struct e1000_hw *hw);
-static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
-static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
-static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
-static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
/* IGP cable length table */
static const
-u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
- { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
- 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
- 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
- 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
- 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
- 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
- 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
-
-static const
-u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
- { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
- 104, 109, 114, 118, 121, 124};
+u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+ 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+ 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+ 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+ 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+ 100,
+ 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+ 110, 110,
+ 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120,
+ 120, 120
+};
static DEFINE_SPINLOCK(e1000_eeprom_lock);
-/******************************************************************************
- * Set the phy type member in the hw struct.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_set_phy_type - Set the phy type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_set_phy_type");
-
- if (hw->mac_type == e1000_undefined)
- return -E1000_ERR_PHY_TYPE;
-
- switch (hw->phy_id) {
- case M88E1000_E_PHY_ID:
- case M88E1000_I_PHY_ID:
- case M88E1011_I_PHY_ID:
- case M88E1111_I_PHY_ID:
- hw->phy_type = e1000_phy_m88;
- break;
- case IGP01E1000_I_PHY_ID:
- if (hw->mac_type == e1000_82541 ||
- hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82547_rev_2) {
- hw->phy_type = e1000_phy_igp;
- break;
- }
- case IGP03E1000_E_PHY_ID:
- hw->phy_type = e1000_phy_igp_3;
- break;
- case IFE_E_PHY_ID:
- case IFE_PLUS_E_PHY_ID:
- case IFE_C_E_PHY_ID:
- hw->phy_type = e1000_phy_ife;
- break;
- case GG82563_E_PHY_ID:
- if (hw->mac_type == e1000_80003es2lan) {
- hw->phy_type = e1000_phy_gg82563;
- break;
- }
- /* Fall Through */
- default:
- /* Should never have loaded on this device */
- hw->phy_type = e1000_phy_undefined;
- return -E1000_ERR_PHY_TYPE;
- }
-
- return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * IGP phy init script - initializes the GbE PHY
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void e1000_phy_init_script(struct e1000_hw *hw)
-{
- u32 ret_val;
- u16 phy_saved_data;
-
- DEBUGFUNC("e1000_phy_init_script");
-
- if (hw->phy_init_script) {
- msleep(20);
-
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
- /* Disabled the PHY transmitter */
- e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+ DEBUGFUNC("e1000_set_phy_type");
- msleep(20);
+ if (hw->mac_type == e1000_undefined)
+ return -E1000_ERR_PHY_TYPE;
- e1000_write_phy_reg(hw,0x0000,0x0140);
-
- msleep(5);
-
- switch (hw->mac_type) {
- case e1000_82541:
- case e1000_82547:
- e1000_write_phy_reg(hw, 0x1F95, 0x0001);
-
- e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
-
- e1000_write_phy_reg(hw, 0x1F79, 0x0018);
-
- e1000_write_phy_reg(hw, 0x1F30, 0x1600);
-
- e1000_write_phy_reg(hw, 0x1F31, 0x0014);
-
- e1000_write_phy_reg(hw, 0x1F32, 0x161C);
-
- e1000_write_phy_reg(hw, 0x1F94, 0x0003);
-
- e1000_write_phy_reg(hw, 0x1F96, 0x003F);
-
- e1000_write_phy_reg(hw, 0x2010, 0x0008);
- break;
-
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- e1000_write_phy_reg(hw, 0x1F73, 0x0099);
- break;
- default:
- break;
- }
-
- e1000_write_phy_reg(hw, 0x0000, 0x3300);
-
- msleep(20);
-
- /* Now enable the transmitter */
- e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
- if (hw->mac_type == e1000_82547) {
- u16 fused, fine, coarse;
-
- /* Move to analog registers page */
- e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
-
- if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
- e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+ switch (hw->phy_id) {
+ case M88E1000_E_PHY_ID:
+ case M88E1000_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ hw->phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID:
+ if (hw->mac_type == e1000_82541 ||
+ hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ hw->phy_type = e1000_phy_igp;
+ break;
+ }
+ default:
+ /* Should never have loaded on this device */
+ hw->phy_type = e1000_phy_undefined;
+ return -E1000_ERR_PHY_TYPE;
+ }
- fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
- coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+ return E1000_SUCCESS;
+}
- if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
- coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
- fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
- } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
- fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+/**
+ * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY
+ * @hw: Struct containing variables accessed by shared code
+ */
+static void e1000_phy_init_script(struct e1000_hw *hw)
+{
+ u32 ret_val;
+ u16 phy_saved_data;
+
+ DEBUGFUNC("e1000_phy_init_script");
+
+ if (hw->phy_init_script) {
+ msleep(20);
+
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of this routine. */
+ ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ /* Disabled the PHY transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+ msleep(20);
+
+ e1000_write_phy_reg(hw, 0x0000, 0x0140);
+ msleep(5);
+
+ switch (hw->mac_type) {
+ case e1000_82541:
+ case e1000_82547:
+ e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+ e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+ e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+ e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+ e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+ e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+ e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+ e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+ e1000_write_phy_reg(hw, 0x2010, 0x0008);
+ break;
- fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
- (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
- (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+ break;
+ default:
+ break;
+ }
- e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
- e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
- IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
- }
- }
- }
+ e1000_write_phy_reg(hw, 0x0000, 0x3300);
+ msleep(20);
+
+ /* Now enable the transmitter */
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (hw->mac_type == e1000_82547) {
+ u16 fused, fine, coarse;
+
+ /* Move to analog registers page */
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+ &fused);
+
+ if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+ e1000_read_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_STATUS,
+ &fused);
+
+ fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+ coarse =
+ fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+ if (coarse >
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+ coarse -=
+ IGP01E1000_ANALOG_FUSE_COARSE_10;
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+ } else if (coarse ==
+ IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+ fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+ fused =
+ (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+ (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+ (coarse &
+ IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_CONTROL,
+ fused);
+ e1000_write_phy_reg(hw,
+ IGP01E1000_ANALOG_FUSE_BYPASS,
+ IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+ }
+ }
+ }
}
-/******************************************************************************
- * Set the mac type member in the hw struct.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_set_mac_type - Set the mac type member in the hw struct.
+ * @hw: Struct containing variables accessed by shared code
+ */
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_set_mac_type");
@@ -397,61 +318,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82547GI:
hw->mac_type = e1000_82547_rev_2;
break;
- case E1000_DEV_ID_82571EB_COPPER:
- case E1000_DEV_ID_82571EB_FIBER:
- case E1000_DEV_ID_82571EB_SERDES:
- case E1000_DEV_ID_82571EB_SERDES_DUAL:
- case E1000_DEV_ID_82571EB_SERDES_QUAD:
- case E1000_DEV_ID_82571EB_QUAD_COPPER:
- case E1000_DEV_ID_82571PT_QUAD_COPPER:
- case E1000_DEV_ID_82571EB_QUAD_FIBER:
- case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
- hw->mac_type = e1000_82571;
- break;
- case E1000_DEV_ID_82572EI_COPPER:
- case E1000_DEV_ID_82572EI_FIBER:
- case E1000_DEV_ID_82572EI_SERDES:
- case E1000_DEV_ID_82572EI:
- hw->mac_type = e1000_82572;
- break;
- case E1000_DEV_ID_82573E:
- case E1000_DEV_ID_82573E_IAMT:
- case E1000_DEV_ID_82573L:
- hw->mac_type = e1000_82573;
- break;
- case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
- case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
- case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
- case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
- hw->mac_type = e1000_80003es2lan;
- break;
- case E1000_DEV_ID_ICH8_IGP_M_AMT:
- case E1000_DEV_ID_ICH8_IGP_AMT:
- case E1000_DEV_ID_ICH8_IGP_C:
- case E1000_DEV_ID_ICH8_IFE:
- case E1000_DEV_ID_ICH8_IFE_GT:
- case E1000_DEV_ID_ICH8_IFE_G:
- case E1000_DEV_ID_ICH8_IGP_M:
- hw->mac_type = e1000_ich8lan;
- break;
default:
/* Should never have loaded on this device */
return -E1000_ERR_MAC_TYPE;
}
switch (hw->mac_type) {
- case e1000_ich8lan:
- hw->swfwhw_semaphore_present = true;
- hw->asf_firmware_present = true;
- break;
- case e1000_80003es2lan:
- hw->swfw_sync_present = true;
- /* fall through */
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- hw->eeprom_semaphore_present = true;
- /* fall through */
case e1000_82541:
case e1000_82547:
case e1000_82541_rev_2:
@@ -468,6058 +340,4500 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
if (hw->mac_type == e1000_82543)
hw->bad_tx_carr_stats_fd = true;
- /* capable of receiving management packets to the host */
- if (hw->mac_type >= e1000_82571)
- hw->has_manc2h = true;
-
- /* In rare occasions, ESB2 systems would end up started without
- * the RX unit being turned on.
- */
- if (hw->mac_type == e1000_80003es2lan)
- hw->rx_needs_kicking = true;
-
if (hw->mac_type > e1000_82544)
hw->has_smbus = true;
return E1000_SUCCESS;
}
-/*****************************************************************************
- * Set media type and TBI compatibility.
- *
- * hw - Struct containing variables accessed by shared code
- * **************************************************************************/
+/**
+ * e1000_set_media_type - Set media type and TBI compatibility.
+ * @hw: Struct containing variables accessed by shared code
+ */
void e1000_set_media_type(struct e1000_hw *hw)
{
- u32 status;
-
- DEBUGFUNC("e1000_set_media_type");
-
- if (hw->mac_type != e1000_82543) {
- /* tbi_compatibility is only valid on 82543 */
- hw->tbi_compatibility_en = false;
- }
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82545GM_SERDES:
- case E1000_DEV_ID_82546GB_SERDES:
- case E1000_DEV_ID_82571EB_SERDES:
- case E1000_DEV_ID_82571EB_SERDES_DUAL:
- case E1000_DEV_ID_82571EB_SERDES_QUAD:
- case E1000_DEV_ID_82572EI_SERDES:
- case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
- hw->media_type = e1000_media_type_internal_serdes;
- break;
- default:
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- hw->media_type = e1000_media_type_fiber;
- break;
- case e1000_ich8lan:
- case e1000_82573:
- /* The STATUS_TBIMODE bit is reserved or reused for the this
- * device.
- */
- hw->media_type = e1000_media_type_copper;
- break;
- default:
- status = er32(STATUS);
- if (status & E1000_STATUS_TBIMODE) {
- hw->media_type = e1000_media_type_fiber;
- /* tbi_compatibility not valid on fiber */
- hw->tbi_compatibility_en = false;
- } else {
- hw->media_type = e1000_media_type_copper;
- }
- break;
- }
- }
+ u32 status;
+
+ DEBUGFUNC("e1000_set_media_type");
+
+ if (hw->mac_type != e1000_82543) {
+ /* tbi_compatibility is only valid on 82543 */
+ hw->tbi_compatibility_en = false;
+ }
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82545GM_SERDES:
+ case E1000_DEV_ID_82546GB_SERDES:
+ hw->media_type = e1000_media_type_internal_serdes;
+ break;
+ default:
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->media_type = e1000_media_type_fiber;
+ break;
+ default:
+ status = er32(STATUS);
+ if (status & E1000_STATUS_TBIMODE) {
+ hw->media_type = e1000_media_type_fiber;
+ /* tbi_compatibility not valid on fiber */
+ hw->tbi_compatibility_en = false;
+ } else {
+ hw->media_type = e1000_media_type_copper;
+ }
+ break;
+ }
+ }
}
-/******************************************************************************
- * Reset the transmit and receive units; mask and clear all interrupts.
+/**
+ * e1000_reset_hw: reset the hardware completely
+ * @hw: Struct containing variables accessed by shared code
*
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ */
s32 e1000_reset_hw(struct e1000_hw *hw)
{
- u32 ctrl;
- u32 ctrl_ext;
- u32 icr;
- u32 manc;
- u32 led_ctrl;
- u32 timeout;
- u32 extcnf_ctrl;
- s32 ret_val;
-
- DEBUGFUNC("e1000_reset_hw");
-
- /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
- if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
- e1000_pci_clear_mwi(hw);
- }
-
- if (hw->bus_type == e1000_bus_type_pci_express) {
- /* Prevent the PCI-E bus from sticking if there is no TLP connection
- * on the last TLP read/write transaction when MAC is reset.
- */
- if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
- DEBUGOUT("PCI-E Master disable polling has failed.\n");
- }
- }
-
- /* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
- ew32(IMC, 0xffffffff);
-
- /* Disable the Transmit and Receive units. Then delay to allow
- * any pending transactions to complete before we hit the MAC with
- * the global reset.
- */
- ew32(RCTL, 0);
- ew32(TCTL, E1000_TCTL_PSP);
- E1000_WRITE_FLUSH();
-
- /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
- hw->tbi_compatibility_on = false;
-
- /* Delay to allow any outstanding PCI transactions to complete before
- * resetting the device
- */
- msleep(10);
-
- ctrl = er32(CTRL);
-
- /* Must reset the PHY before resetting the MAC */
- if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
- ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
- msleep(5);
- }
-
- /* Must acquire the MDIO ownership before MAC reset.
- * Ownership defaults to firmware after a reset. */
- if (hw->mac_type == e1000_82573) {
- timeout = 10;
-
- extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- do {
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- extcnf_ctrl = er32(EXTCNF_CTRL);
-
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
- break;
- else
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
- msleep(2);
- timeout--;
- } while (timeout);
- }
-
- /* Workaround for ICH8 bit corruption issue in FIFO memory */
- if (hw->mac_type == e1000_ich8lan) {
- /* Set Tx and Rx buffer allocation to 8k apiece. */
- ew32(PBA, E1000_PBA_8K);
- /* Set Packet Buffer Size to 16k. */
- ew32(PBS, E1000_PBS_16K);
- }
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- DEBUGOUT("Issuing a global reset to MAC\n");
-
- switch (hw->mac_type) {
- case e1000_82544:
- case e1000_82540:
- case e1000_82545:
- case e1000_82546:
- case e1000_82541:
- case e1000_82541_rev_2:
- /* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
- E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
- break;
- case e1000_82545_rev_3:
- case e1000_82546_rev_3:
- /* Reset is performed on a shadow of the control register */
- ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
- break;
- case e1000_ich8lan:
- if (!hw->phy_reset_disable &&
- e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
- /* e1000_ich8lan PHY HW reset requires MAC CORE reset
- * at the same time to make sure the interface between
- * MAC and the external PHY is reset.
- */
- ctrl |= E1000_CTRL_PHY_RST;
- }
-
- e1000_get_software_flag(hw);
- ew32(CTRL, (ctrl | E1000_CTRL_RST));
- msleep(5);
- break;
- default:
- ew32(CTRL, (ctrl | E1000_CTRL_RST));
- break;
- }
-
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
- */
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- case e1000_82544:
- /* Wait for reset to complete */
- udelay(10);
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- ew32(CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH();
- /* Wait for EEPROM reload */
- msleep(2);
- break;
- case e1000_82541:
- case e1000_82541_rev_2:
- case e1000_82547:
- case e1000_82547_rev_2:
- /* Wait for EEPROM reload */
- msleep(20);
- break;
- case e1000_82573:
- if (!e1000_is_onboard_nvm_eeprom(hw)) {
- udelay(10);
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- ew32(CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH();
- }
- /* fall through */
- default:
- /* Auto read done will delay 5ms or poll based on mac type */
- ret_val = e1000_get_auto_rd_done(hw);
- if (ret_val)
- return ret_val;
- break;
- }
-
- /* Disable HW ARPs on ASF enabled adapters */
- if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
- manc = er32(MANC);
- manc &= ~(E1000_MANC_ARP_EN);
- ew32(MANC, manc);
- }
-
- if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
- e1000_phy_init_script(hw);
-
- /* Configure activity LED after PHY reset */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
- }
-
- /* Clear interrupt mask to stop board from generating interrupts */
- DEBUGOUT("Masking off all interrupts\n");
- ew32(IMC, 0xffffffff);
-
- /* Clear any pending interrupt events. */
- icr = er32(ICR);
-
- /* If MWI was previously enabled, reenable it. */
- if (hw->mac_type == e1000_82542_rev2_0) {
- if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
- e1000_pci_set_mwi(hw);
- }
-
- if (hw->mac_type == e1000_ich8lan) {
- u32 kab = er32(KABGTXD);
- kab |= E1000_KABGTXD_BGSQLBIAS;
- ew32(KABGTXD, kab);
- }
-
- return E1000_SUCCESS;
-}
+ u32 ctrl;
+ u32 ctrl_ext;
+ u32 icr;
+ u32 manc;
+ u32 led_ctrl;
+ s32 ret_val;
-/******************************************************************************
- *
- * Initialize a number of hardware-dependent bits
- *
- * hw: Struct containing variables accessed by shared code
- *
- * This function contains hardware limitation workarounds for PCI-E adapters
- *
- *****************************************************************************/
-static void e1000_initialize_hardware_bits(struct e1000_hw *hw)
-{
- if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
- /* Settings common to all PCI-express silicon */
- u32 reg_ctrl, reg_ctrl_ext;
- u32 reg_tarc0, reg_tarc1;
- u32 reg_tctl;
- u32 reg_txdctl, reg_txdctl1;
-
- /* link autonegotiation/sync workarounds */
- reg_tarc0 = er32(TARC0);
- reg_tarc0 &= ~((1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
-
- /* Enable not-done TX descriptor counting */
- reg_txdctl = er32(TXDCTL);
- reg_txdctl |= E1000_TXDCTL_COUNT_DESC;
- ew32(TXDCTL, reg_txdctl);
- reg_txdctl1 = er32(TXDCTL1);
- reg_txdctl1 |= E1000_TXDCTL_COUNT_DESC;
- ew32(TXDCTL1, reg_txdctl1);
-
- switch (hw->mac_type) {
- case e1000_82571:
- case e1000_82572:
- /* Clear PHY TX compatible mode bits */
- reg_tarc1 = er32(TARC1);
- reg_tarc1 &= ~((1 << 30)|(1 << 29));
-
- /* link autonegotiation/sync workarounds */
- reg_tarc0 |= ((1 << 26)|(1 << 25)|(1 << 24)|(1 << 23));
-
- /* TX ring control fixes */
- reg_tarc1 |= ((1 << 26)|(1 << 25)|(1 << 24));
-
- /* Multiple read bit is reversed polarity */
- reg_tctl = er32(TCTL);
- if (reg_tctl & E1000_TCTL_MULR)
- reg_tarc1 &= ~(1 << 28);
- else
- reg_tarc1 |= (1 << 28);
-
- ew32(TARC1, reg_tarc1);
- break;
- case e1000_82573:
- reg_ctrl_ext = er32(CTRL_EXT);
- reg_ctrl_ext &= ~(1 << 23);
- reg_ctrl_ext |= (1 << 22);
-
- /* TX byte count fix */
- reg_ctrl = er32(CTRL);
- reg_ctrl &= ~(1 << 29);
-
- ew32(CTRL_EXT, reg_ctrl_ext);
- ew32(CTRL, reg_ctrl);
- break;
- case e1000_80003es2lan:
- /* improve small packet performace for fiber/serdes */
- if ((hw->media_type == e1000_media_type_fiber) ||
- (hw->media_type == e1000_media_type_internal_serdes)) {
- reg_tarc0 &= ~(1 << 20);
- }
-
- /* Multiple read bit is reversed polarity */
- reg_tctl = er32(TCTL);
- reg_tarc1 = er32(TARC1);
- if (reg_tctl & E1000_TCTL_MULR)
- reg_tarc1 &= ~(1 << 28);
- else
- reg_tarc1 |= (1 << 28);
-
- ew32(TARC1, reg_tarc1);
- break;
- case e1000_ich8lan:
- /* Reduce concurrent DMA requests to 3 from 4 */
- if ((hw->revision_id < 3) ||
- ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
- (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))
- reg_tarc0 |= ((1 << 29)|(1 << 28));
-
- reg_ctrl_ext = er32(CTRL_EXT);
- reg_ctrl_ext |= (1 << 22);
- ew32(CTRL_EXT, reg_ctrl_ext);
-
- /* workaround TX hang with TSO=on */
- reg_tarc0 |= ((1 << 27)|(1 << 26)|(1 << 24)|(1 << 23));
-
- /* Multiple read bit is reversed polarity */
- reg_tctl = er32(TCTL);
- reg_tarc1 = er32(TARC1);
- if (reg_tctl & E1000_TCTL_MULR)
- reg_tarc1 &= ~(1 << 28);
- else
- reg_tarc1 |= (1 << 28);
-
- /* workaround TX hang with TSO=on */
- reg_tarc1 |= ((1 << 30)|(1 << 26)|(1 << 24));
-
- ew32(TARC1, reg_tarc1);
- break;
- default:
- break;
- }
-
- ew32(TARC0, reg_tarc0);
- }
+ DEBUGFUNC("e1000_reset_hw");
+
+ /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Disable the Transmit and Receive units. Then delay to allow
+ * any pending transactions to complete before we hit the MAC with
+ * the global reset.
+ */
+ ew32(RCTL, 0);
+ ew32(TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH();
+
+ /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+ hw->tbi_compatibility_on = false;
+
+ /* Delay to allow any outstanding PCI transactions to complete before
+ * resetting the device
+ */
+ msleep(10);
+
+ ctrl = er32(CTRL);
+
+ /* Must reset the PHY before resetting the MAC */
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST));
+ msleep(5);
+ }
+
+ /* Issue a global reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA, and link units. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ DEBUGOUT("Issuing a global reset to MAC\n");
+
+ switch (hw->mac_type) {
+ case e1000_82544:
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82546:
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ /* These controllers can't ack the 64-bit write when issuing the
+ * reset, so use IO-mapping as a workaround to issue the reset */
+ E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ /* Reset is performed on a shadow of the control register */
+ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
+ break;
+ default:
+ ew32(CTRL, (ctrl | E1000_CTRL_RST));
+ break;
+ }
+
+ /* After MAC reset, force reload of EEPROM to restore power-on settings to
+ * device. Later controllers reload the EEPROM automatically, so just wait
+ * for reload to complete.
+ */
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* Wait for reset to complete */
+ udelay(10);
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ /* Wait for EEPROM reload */
+ msleep(2);
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ /* Wait for EEPROM reload */
+ msleep(20);
+ break;
+ default:
+ /* Auto read done will delay 5ms or poll based on mac type */
+ ret_val = e1000_get_auto_rd_done(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ /* Disable HW ARPs on ASF enabled adapters */
+ if (hw->mac_type >= e1000_82540) {
+ manc = er32(MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ ew32(MANC, manc);
+ }
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ e1000_phy_init_script(hw);
+
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Clear interrupt mask to stop board from generating interrupts */
+ DEBUGOUT("Masking off all interrupts\n");
+ ew32(IMC, 0xffffffff);
+
+ /* Clear any pending interrupt events. */
+ icr = er32(ICR);
+
+ /* If MWI was previously enabled, reenable it. */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Performs basic configuration of the adapter.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_init_hw: Performs basic configuration of the adapter.
+ * @hw: Struct containing variables accessed by shared code
*
* Assumes that the controller has previously been reset and is in a
* post-reset uninitialized state. Initializes the receive address registers,
* multicast table, and VLAN filter table. Calls routines to setup link
* configuration and flow control settings. Clears all on-chip counters. Leaves
* the transmit and receive units disabled and uninitialized.
- *****************************************************************************/
+ */
s32 e1000_init_hw(struct e1000_hw *hw)
{
- u32 ctrl;
- u32 i;
- s32 ret_val;
- u32 mta_size;
- u32 reg_data;
- u32 ctrl_ext;
-
- DEBUGFUNC("e1000_init_hw");
-
- /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
- if ((hw->mac_type == e1000_ich8lan) &&
- ((hw->revision_id < 3) ||
- ((hw->device_id != E1000_DEV_ID_ICH8_IGP_M_AMT) &&
- (hw->device_id != E1000_DEV_ID_ICH8_IGP_M)))) {
- reg_data = er32(STATUS);
- reg_data &= ~0x80000000;
- ew32(STATUS, reg_data);
- }
-
- /* Initialize Identification LED */
- ret_val = e1000_id_led_init(hw);
- if (ret_val) {
- DEBUGOUT("Error Initializing Identification LED\n");
- return ret_val;
- }
-
- /* Set the media type and TBI compatibility */
- e1000_set_media_type(hw);
-
- /* Must be called after e1000_set_media_type because media_type is used */
- e1000_initialize_hardware_bits(hw);
-
- /* Disabling VLAN filtering. */
- DEBUGOUT("Initializing the IEEE VLAN\n");
- /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
- if (hw->mac_type != e1000_ich8lan) {
- if (hw->mac_type < e1000_82545_rev_3)
- ew32(VET, 0);
- e1000_clear_vfta(hw);
- }
-
- /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
- if (hw->mac_type == e1000_82542_rev2_0) {
- DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
- e1000_pci_clear_mwi(hw);
- ew32(RCTL, E1000_RCTL_RST);
- E1000_WRITE_FLUSH();
- msleep(5);
- }
-
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
- */
- e1000_init_rx_addrs(hw);
-
- /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
- if (hw->mac_type == e1000_82542_rev2_0) {
- ew32(RCTL, 0);
- E1000_WRITE_FLUSH();
- msleep(1);
- if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
- e1000_pci_set_mwi(hw);
- }
-
- /* Zero out the Multicast HASH table */
- DEBUGOUT("Zeroing the MTA\n");
- mta_size = E1000_MC_TBL_SIZE;
- if (hw->mac_type == e1000_ich8lan)
- mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
- for (i = 0; i < mta_size; i++) {
- E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
- /* use write flush to prevent Memory Write Block (MWB) from
- * occuring when accessing our register space */
- E1000_WRITE_FLUSH();
- }
-
- /* Set the PCI priority bit correctly in the CTRL register. This
- * determines if the adapter gives priority to receives, or if it
- * gives equal priority to transmits and receives. Valid only on
- * 82542 and 82543 silicon.
- */
- if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
- ctrl = er32(CTRL);
- ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
- }
-
- switch (hw->mac_type) {
- case e1000_82545_rev_3:
- case e1000_82546_rev_3:
- break;
- default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
- if (hw->bus_type == e1000_bus_type_pcix && e1000_pcix_get_mmrbc(hw) > 2048)
- e1000_pcix_set_mmrbc(hw, 2048);
- break;
- }
-
- /* More time needed for PHY to initialize */
- if (hw->mac_type == e1000_ich8lan)
- msleep(15);
-
- /* Call a subroutine to configure the link and setup flow control. */
- ret_val = e1000_setup_link(hw);
-
- /* Set the transmit descriptor write-back policy */
- if (hw->mac_type > e1000_82544) {
- ctrl = er32(TXDCTL);
- ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
- ew32(TXDCTL, ctrl);
- }
-
- if (hw->mac_type == e1000_82573) {
- e1000_enable_tx_pkt_filtering(hw);
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_80003es2lan:
- /* Enable retransmit on late collisions */
- reg_data = er32(TCTL);
- reg_data |= E1000_TCTL_RTLC;
- ew32(TCTL, reg_data);
-
- /* Configure Gigabit Carry Extend Padding */
- reg_data = er32(TCTL_EXT);
- reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
- reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
- ew32(TCTL_EXT, reg_data);
-
- /* Configure Transmit Inter-Packet Gap */
- reg_data = er32(TIPG);
- reg_data &= ~E1000_TIPG_IPGT_MASK;
- reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
- ew32(TIPG, reg_data);
-
- reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
- reg_data &= ~0x00100000;
- E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
- /* Fall through */
- case e1000_82571:
- case e1000_82572:
- case e1000_ich8lan:
- ctrl = er32(TXDCTL1);
- ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
- ew32(TXDCTL1, ctrl);
- break;
- }
-
-
- if (hw->mac_type == e1000_82573) {
- u32 gcr = er32(GCR);
- gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
- ew32(GCR, gcr);
- }
-
- /* Clear all of the statistics registers (clear on read). It is
- * important that we do this after we have tried to establish link
- * because the symbol error count will increment wildly if there
- * is no link.
- */
- e1000_clear_hw_cntrs(hw);
-
- /* ICH8 No-snoop bits are opposite polarity.
- * Set to snoop by default after reset. */
- if (hw->mac_type == e1000_ich8lan)
- e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
-
- if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
- hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
- ctrl_ext = er32(CTRL_EXT);
- /* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
- ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
- ew32(CTRL_EXT, ctrl_ext);
- }
-
- return ret_val;
+ u32 ctrl;
+ u32 i;
+ s32 ret_val;
+ u32 mta_size;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_init_hw");
+
+ /* Initialize Identification LED */
+ ret_val = e1000_id_led_init(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Initializing Identification LED\n");
+ return ret_val;
+ }
+
+ /* Set the media type and TBI compatibility */
+ e1000_set_media_type(hw);
+
+ /* Disabling VLAN filtering. */
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ if (hw->mac_type < e1000_82545_rev_3)
+ ew32(VET, 0);
+ e1000_clear_vfta(hw);
+
+ /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+ e1000_pci_clear_mwi(hw);
+ ew32(RCTL, E1000_RCTL_RST);
+ E1000_WRITE_FLUSH();
+ msleep(5);
+ }
+
+ /* Setup the receive address. This involves initializing all of the Receive
+ * Address Registers (RARs 0 - 15).
+ */
+ e1000_init_rx_addrs(hw);
+
+ /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+ if (hw->mac_type == e1000_82542_rev2_0) {
+ ew32(RCTL, 0);
+ E1000_WRITE_FLUSH();
+ msleep(1);
+ if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
+ e1000_pci_set_mwi(hw);
+ }
+
+ /* Zero out the Multicast HASH table */
+ DEBUGOUT("Zeroing the MTA\n");
+ mta_size = E1000_MC_TBL_SIZE;
+ for (i = 0; i < mta_size; i++) {
+ E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+ /* use write flush to prevent Memory Write Block (MWB) from
+ * occurring when accessing our register space */
+ E1000_WRITE_FLUSH();
+ }
+
+ /* Set the PCI priority bit correctly in the CTRL register. This
+ * determines if the adapter gives priority to receives, or if it
+ * gives equal priority to transmits and receives. Valid only on
+ * 82542 and 82543 silicon.
+ */
+ if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PRIOR);
+ }
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ if (hw->bus_type == e1000_bus_type_pcix
+ && e1000_pcix_get_mmrbc(hw) > 2048)
+ e1000_pcix_set_mmrbc(hw, 2048);
+ break;
+ }
+
+ /* Call a subroutine to configure the link and setup flow control. */
+ ret_val = e1000_setup_link(hw);
+
+ /* Set the transmit descriptor write-back policy */
+ if (hw->mac_type > e1000_82544) {
+ ctrl = er32(TXDCTL);
+ ctrl =
+ (ctrl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB;
+ ew32(TXDCTL, ctrl);
+ }
+
+ /* Clear all of the statistics registers (clear on read). It is
+ * important that we do this after we have tried to establish link
+ * because the symbol error count will increment wildly if there
+ * is no link.
+ */
+ e1000_clear_hw_cntrs(hw);
+
+ if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+ hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+ ctrl_ext = er32(CTRL_EXT);
+ /* Relaxed ordering must be disabled to avoid a parity
+ * error crash in a PCI slot. */
+ ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ return ret_val;
}
-/******************************************************************************
- * Adjust SERDES output amplitude based on EEPROM setting.
- *
- * hw - Struct containing variables accessed by shared code.
- *****************************************************************************/
+/**
+ * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting.
+ * @hw: Struct containing variables accessed by shared code.
+ */
static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
{
- u16 eeprom_data;
- s32 ret_val;
-
- DEBUGFUNC("e1000_adjust_serdes_amplitude");
-
- if (hw->media_type != e1000_media_type_internal_serdes)
- return E1000_SUCCESS;
-
- switch (hw->mac_type) {
- case e1000_82545_rev_3:
- case e1000_82546_rev_3:
- break;
- default:
- return E1000_SUCCESS;
- }
-
- ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
- if (ret_val) {
- return ret_val;
- }
-
- if (eeprom_data != EEPROM_RESERVED_WORD) {
- /* Adjust SERDES output amplitude only. */
- eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ u16 eeprom_data;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+ if (hw->media_type != e1000_media_type_internal_serdes)
+ return E1000_SUCCESS;
+
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+
+ ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
+
+ if (eeprom_data != EEPROM_RESERVED_WORD) {
+ /* Adjust SERDES output amplitude only. */
+ eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Configures flow control and link settings.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_setup_link - Configures flow control and link settings.
+ * @hw: Struct containing variables accessed by shared code
*
- * Determines which flow control settings to use. Calls the apropriate media-
+ * Determines which flow control settings to use. Calls the appropriate media-
* specific link configuration function. Configures the flow control settings.
* Assuming the adapter has a valid link partner, a valid link should be
* established. Assumes the hardware has previously been reset and the
* transmitter and receiver are not enabled.
- *****************************************************************************/
+ */
s32 e1000_setup_link(struct e1000_hw *hw)
{
- u32 ctrl_ext;
- s32 ret_val;
- u16 eeprom_data;
-
- DEBUGFUNC("e1000_setup_link");
-
- /* In the case of the phy reset being blocked, we already have a link.
- * We do not have to set it up again. */
- if (e1000_check_phy_reset_block(hw))
- return E1000_SUCCESS;
-
- /* Read and store word 0x0F of the EEPROM. This word contains bits
- * that determine the hardware's default PAUSE (flow control) mode,
- * a bit that determines whether the HW defaults to enabling or
- * disabling auto-negotiation, and the direction of the
- * SW defined pins. If there is no SW over-ride of the flow
- * control setting, then the variable hw->fc will
- * be initialized based on a value in the EEPROM.
- */
- if (hw->fc == E1000_FC_DEFAULT) {
- switch (hw->mac_type) {
- case e1000_ich8lan:
- case e1000_82573:
- hw->fc = E1000_FC_FULL;
- break;
- default:
- ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
- 1, &eeprom_data);
- if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
- hw->fc = E1000_FC_NONE;
- else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
- EEPROM_WORD0F_ASM_DIR)
- hw->fc = E1000_FC_TX_PAUSE;
- else
- hw->fc = E1000_FC_FULL;
- break;
- }
- }
-
- /* We want to save off the original Flow Control configuration just
- * in case we get disconnected and then reconnected into a different
- * hub or switch with different Flow Control capabilities.
- */
- if (hw->mac_type == e1000_82542_rev2_0)
- hw->fc &= (~E1000_FC_TX_PAUSE);
-
- if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
- hw->fc &= (~E1000_FC_RX_PAUSE);
-
- hw->original_fc = hw->fc;
-
- DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
-
- /* Take the 4 bits from EEPROM word 0x0F that determine the initial
- * polarity value for the SW controlled pins, and setup the
- * Extended Device Control reg with that info.
- * This is needed because one of the SW controlled pins is used for
- * signal detection. So this should be done before e1000_setup_pcs_link()
- * or e1000_phy_setup() is called.
- */
- if (hw->mac_type == e1000_82543) {
- ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
- 1, &eeprom_data);
- if (ret_val) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
- SWDPIO__EXT_SHIFT);
- ew32(CTRL_EXT, ctrl_ext);
- }
-
- /* Call the necessary subroutine to configure the link. */
- ret_val = (hw->media_type == e1000_media_type_copper) ?
- e1000_setup_copper_link(hw) :
- e1000_setup_fiber_serdes_link(hw);
-
- /* Initialize the flow control address, type, and PAUSE timer
- * registers to their default values. This is done even if flow
- * control is disabled, because it does not hurt anything to
- * initialize these registers.
- */
- DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
-
- /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
- if (hw->mac_type != e1000_ich8lan) {
- ew32(FCT, FLOW_CONTROL_TYPE);
- ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
- }
-
- ew32(FCTTV, hw->fc_pause_time);
-
- /* Set the flow control receive threshold registers. Normally,
- * these registers will be set to a default threshold that may be
- * adjusted later by the driver's runtime code. However, if the
- * ability to transmit pause frames in not enabled, then these
- * registers will be set to 0.
- */
- if (!(hw->fc & E1000_FC_TX_PAUSE)) {
- ew32(FCRTL, 0);
- ew32(FCRTH, 0);
- } else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
- */
- if (hw->fc_send_xon) {
- ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
- ew32(FCRTH, hw->fc_high_water);
- } else {
- ew32(FCRTL, hw->fc_low_water);
- ew32(FCRTH, hw->fc_high_water);
- }
- }
- return ret_val;
+ u32 ctrl_ext;
+ s32 ret_val;
+ u16 eeprom_data;
+
+ DEBUGFUNC("e1000_setup_link");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->fc == E1000_FC_DEFAULT) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+ hw->fc = E1000_FC_NONE;
+ else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+ EEPROM_WORD0F_ASM_DIR)
+ hw->fc = E1000_FC_TX_PAUSE;
+ else
+ hw->fc = E1000_FC_FULL;
+ }
+
+ /* We want to save off the original Flow Control configuration just
+ * in case we get disconnected and then reconnected into a different
+ * hub or switch with different Flow Control capabilities.
+ */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ hw->fc &= (~E1000_FC_TX_PAUSE);
+
+ if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+ hw->fc &= (~E1000_FC_RX_PAUSE);
+
+ hw->original_fc = hw->fc;
+
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+ /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+ * polarity value for the SW controlled pins, and setup the
+ * Extended Device Control reg with that info.
+ * This is needed because one of the SW controlled pins is used for
+ * signal detection. So this should be done before e1000_setup_pcs_link()
+ * or e1000_phy_setup() is called.
+ */
+ if (hw->mac_type == e1000_82543) {
+ ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+ 1, &eeprom_data);
+ if (ret_val) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+ SWDPIO__EXT_SHIFT);
+ ew32(CTRL_EXT, ctrl_ext);
+ }
+
+ /* Call the necessary subroutine to configure the link. */
+ ret_val = (hw->media_type == e1000_media_type_copper) ?
+ e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw);
+
+ /* Initialize the flow control address, type, and PAUSE timer
+ * registers to their default values. This is done even if flow
+ * control is disabled, because it does not hurt anything to
+ * initialize these registers.
+ */
+ DEBUGOUT
+ ("Initializing the Flow Control address, type and timer regs\n");
+
+ ew32(FCT, FLOW_CONTROL_TYPE);
+ ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+ ew32(FCTTV, hw->fc_pause_time);
+
+ /* Set the flow control receive threshold registers. Normally,
+ * these registers will be set to a default threshold that may be
+ * adjusted later by the driver's runtime code. However, if the
+ * ability to transmit pause frames in not enabled, then these
+ * registers will be set to 0.
+ */
+ if (!(hw->fc & E1000_FC_TX_PAUSE)) {
+ ew32(FCRTL, 0);
+ ew32(FCRTH, 0);
+ } else {
+ /* We need to set up the Receive Threshold high and low water marks
+ * as well as (optionally) enabling the transmission of XON frames.
+ */
+ if (hw->fc_send_xon) {
+ ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+ ew32(FCRTH, hw->fc_high_water);
+ } else {
+ ew32(FCRTL, hw->fc_low_water);
+ ew32(FCRTH, hw->fc_high_water);
+ }
+ }
+ return ret_val;
}
-/******************************************************************************
- * Sets up link for a fiber based or serdes based adapter
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_setup_fiber_serdes_link - prepare fiber or serdes link
+ * @hw: Struct containing variables accessed by shared code
*
* Manipulates Physical Coding Sublayer functions in order to configure
* link. Assumes the hardware has been previously reset and the transmitter
* and receiver are not enabled.
- *****************************************************************************/
+ */
static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
{
- u32 ctrl;
- u32 status;
- u32 txcw = 0;
- u32 i;
- u32 signal = 0;
- s32 ret_val;
-
- DEBUGFUNC("e1000_setup_fiber_serdes_link");
-
- /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
- * until explicitly turned off or a power cycle is performed. A read to
- * the register does not indicate its status. Therefore, we ensure
- * loopback mode is disabled during initialization.
- */
- if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
- ew32(SCTL, E1000_DISABLE_SERDES_LOOPBACK);
-
- /* On adapters with a MAC newer than 82544, SWDP 1 will be
- * set when the optics detect a signal. On older adapters, it will be
- * cleared when there is a signal. This applies to fiber media only.
- * If we're on serdes media, adjust the output amplitude to value
- * set in the EEPROM.
- */
- ctrl = er32(CTRL);
- if (hw->media_type == e1000_media_type_fiber)
- signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
-
- ret_val = e1000_adjust_serdes_amplitude(hw);
- if (ret_val)
- return ret_val;
-
- /* Take the link out of reset */
- ctrl &= ~(E1000_CTRL_LRST);
-
- /* Adjust VCO speed to improve BER performance */
- ret_val = e1000_set_vco_speed(hw);
- if (ret_val)
- return ret_val;
-
- e1000_config_collision_dist(hw);
-
- /* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
- */
- switch (hw->fc) {
- case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
- break;
- case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
- */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
- break;
- case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
- */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
- break;
- case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
- txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- break;
- }
-
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-neogtiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
- */
- DEBUGOUT("Auto-negotiation enabled\n");
-
- ew32(TXCW, txcw);
- ew32(CTRL, ctrl);
- E1000_WRITE_FLUSH();
-
- hw->txcw = txcw;
- msleep(1);
-
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
- */
- if (hw->media_type == e1000_media_type_internal_serdes ||
- (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
- DEBUGOUT("Looking for Link\n");
- for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
- msleep(10);
- status = er32(STATUS);
- if (status & E1000_STATUS_LU) break;
- }
- if (i == (LINK_UP_TIMEOUT / 10)) {
- DEBUGOUT("Never got a valid link from auto-neg!!!\n");
- hw->autoneg_failed = 1;
- /* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
- */
- ret_val = e1000_check_for_link(hw);
- if (ret_val) {
- DEBUGOUT("Error while checking for link\n");
- return ret_val;
- }
- hw->autoneg_failed = 0;
- } else {
- hw->autoneg_failed = 0;
- DEBUGOUT("Valid Link Found\n");
- }
- } else {
- DEBUGOUT("No Signal Detected\n");
- }
- return E1000_SUCCESS;
+ u32 ctrl;
+ u32 status;
+ u32 txcw = 0;
+ u32 i;
+ u32 signal = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+ /* On adapters with a MAC newer than 82544, SWDP 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ * If we're on serdes media, adjust the output amplitude to value
+ * set in the EEPROM.
+ */
+ ctrl = er32(CTRL);
+ if (hw->media_type == e1000_media_type_fiber)
+ signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+ ret_val = e1000_adjust_serdes_amplitude(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Take the link out of reset */
+ ctrl &= ~(E1000_CTRL_LRST);
+
+ /* Adjust VCO speed to improve BER performance */
+ ret_val = e1000_set_vco_speed(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000_config_collision_dist(hw);
+
+ /* Check for a software override of the flow control settings, and setup
+ * the device accordingly. If auto-negotiation is enabled, then software
+ * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+ * Config Word Register (TXCW) and re-start auto-negotiation. However, if
+ * auto-negotiation is disabled, then software will have to manually
+ * configure the two flow control enable bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ /* Flow control is completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case E1000_FC_RX_PAUSE:
+ /* RX Flow control is enabled and TX Flow control is disabled by a
+ * software over-ride. Since there really isn't a way to advertise
+ * that we are capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case E1000_FC_TX_PAUSE:
+ /* TX Flow control is enabled, and RX Flow control is disabled, by a
+ * software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case E1000_FC_FULL:
+ /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the link
+ * will be in reset, because we previously reset the chip). This will
+ * restart auto-negotiation. If auto-negotiation is successful then the
+ * link-up status bit will be set and the flow control enable bits (RFCE
+ * and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ ew32(TXCW, txcw);
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ hw->txcw = txcw;
+ msleep(1);
+
+ /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+ * indication in the Device Status Register. Time-out if a link isn't
+ * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+ * less than 500 milliseconds even if the other end is doing it in SW).
+ * For internal serdes, we just assume a signal is present, then poll.
+ */
+ if (hw->media_type == e1000_media_type_internal_serdes ||
+ (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+ DEBUGOUT("Looking for Link\n");
+ for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+ msleep(10);
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == (LINK_UP_TIMEOUT / 10)) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ hw->autoneg_failed = 1;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * e1000_check_for_link. This routine will force the link up if
+ * we detect a signal. This will allow us to communicate with
+ * non-autonegotiating link partners.
+ */
+ ret_val = e1000_check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ hw->autoneg_failed = 0;
+ } else {
+ hw->autoneg_failed = 0;
+ DEBUGOUT("Valid Link Found\n");
+ }
+ } else {
+ DEBUGOUT("No Signal Detected\n");
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Make sure we have a valid PHY and change PHY mode before link setup.
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_copper_link_preconfig - early configuration for copper
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Make sure we have a valid PHY and change PHY mode before link setup.
+ */
static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
{
- u32 ctrl;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_preconfig");
-
- ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
- */
- if (hw->mac_type > e1000_82543) {
- ctrl |= E1000_CTRL_SLU;
- ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- ew32(CTRL, ctrl);
- } else {
- ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
- ew32(CTRL, ctrl);
- ret_val = e1000_phy_hw_reset(hw);
- if (ret_val)
- return ret_val;
- }
-
- /* Make sure we have a valid PHY */
- ret_val = e1000_detect_gig_phy(hw);
- if (ret_val) {
- DEBUGOUT("Error, did not detect valid phy.\n");
- return ret_val;
- }
- DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
-
- /* Set PHY to class A mode (if necessary) */
- ret_val = e1000_set_phy_mode(hw);
- if (ret_val)
- return ret_val;
-
- if ((hw->mac_type == e1000_82545_rev_3) ||
- (hw->mac_type == e1000_82546_rev_3)) {
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- phy_data |= 0x00000008;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- }
-
- if (hw->mac_type <= e1000_82543 ||
- hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
- hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
- hw->phy_reset_disable = false;
-
- return E1000_SUCCESS;
-}
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_copper_link_preconfig");
-/********************************************************************
-* Copper link setup for e1000_phy_igp series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
-{
- u32 led_ctrl;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_igp_setup");
-
- if (hw->phy_reset_disable)
- return E1000_SUCCESS;
-
- ret_val = e1000_phy_reset(hw);
- if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
- return ret_val;
- }
-
- /* Wait 15ms for MAC to configure PHY from eeprom settings */
- msleep(15);
- if (hw->mac_type != e1000_ich8lan) {
- /* Configure activity LED after PHY reset */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
- }
-
- /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
- if (hw->phy_type == e1000_phy_igp) {
- /* disable lplu d3 during driver init */
- ret_val = e1000_set_d3_lplu_state(hw, false);
- if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D3\n");
- return ret_val;
- }
- }
-
- /* disable lplu d0 during driver init */
- ret_val = e1000_set_d0_lplu_state(hw, false);
- if (ret_val) {
- DEBUGOUT("Error Disabling LPLU D0\n");
- return ret_val;
- }
- /* Configure mdi-mdix settings */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
- hw->dsp_config_state = e1000_dsp_config_disabled;
- /* Force MDI for earlier revs of the IGP PHY */
- phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
- hw->mdix = 1;
-
- } else {
- hw->dsp_config_state = e1000_dsp_config_enabled;
- phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
-
- switch (hw->mdix) {
- case 1:
- phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
- break;
- case 2:
- phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
- break;
- case 0:
- default:
- phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
- break;
- }
- }
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* set auto-master slave resolution settings */
- if (hw->autoneg) {
- e1000_ms_type phy_ms_setting = hw->master_slave;
-
- if (hw->ffe_config_state == e1000_ffe_config_active)
- hw->ffe_config_state = e1000_ffe_config_enabled;
-
- if (hw->dsp_config_state == e1000_dsp_config_activated)
- hw->dsp_config_state = e1000_dsp_config_enabled;
-
- /* when autonegotiation advertisment is only 1000Mbps then we
- * should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
- if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
- /* Disable SmartSpeed */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &phy_data);
- if (ret_val)
- return ret_val;
- phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if (ret_val)
- return ret_val;
- /* Set auto Master/Slave resolution process */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
- phy_data &= ~CR_1000T_MS_ENABLE;
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- }
-
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* load defaults for future use */
- hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
- e1000_ms_force_master :
- e1000_ms_force_slave) :
- e1000_ms_auto;
-
- switch (phy_ms_setting) {
- case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
- break;
- case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
- break;
- case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
- default:
- break;
- }
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ ctrl = er32(CTRL);
+ /* With 82543, we need to force speed and duplex on the MAC equal to what
+ * the PHY speed and duplex configuration is. In addition, we need to
+ * perform a hardware reset on the PHY to take it out of reset.
+ */
+ if (hw->mac_type > e1000_82543) {
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+ } else {
+ ctrl |=
+ (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+ ew32(CTRL, ctrl);
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Make sure we have a valid PHY */
+ ret_val = e1000_detect_gig_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error, did not detect valid phy.\n");
+ return ret_val;
+ }
+ DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+ /* Set PHY to class A mode (if necessary) */
+ ret_val = e1000_set_phy_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82545_rev_3) ||
+ (hw->mac_type == e1000_82546_rev_3)) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ phy_data |= 0x00000008;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ }
+
+ if (hw->mac_type <= e1000_82543 ||
+ hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+ hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2)
+ hw->phy_reset_disable = false;
+
+ return E1000_SUCCESS;
}
-/********************************************************************
-* Copper link setup for e1000_phy_gg82563 series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
-static s32 e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+/**
+ * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series.
+ * @hw: Struct containing variables accessed by shared code
+ */
+static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 phy_data;
- u32 reg_data;
-
- DEBUGFUNC("e1000_copper_link_ggp_setup");
-
- if (!hw->phy_reset_disable) {
-
- /* Enable CRS on TX for half-duplex operation. */
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
- /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
- phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
-
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
- phy_data);
- if (ret_val)
- return ret_val;
-
- /* Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
-
- switch (hw->mdix) {
- case 1:
- phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
- break;
- case 2:
- phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
- break;
- case 0:
- default:
- phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
- break;
- }
-
- /* Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- if (hw->disable_polarity_correction == 1)
- phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
-
- if (ret_val)
- return ret_val;
-
- /* SW Reset the PHY so all changes take effect */
- ret_val = e1000_phy_reset(hw);
- if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
- return ret_val;
- }
- } /* phy_reset_disable */
-
- if (hw->mac_type == e1000_80003es2lan) {
- /* Bypass RX and TX FIFO's */
- ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
- E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
- E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
-
- if (ret_val)
- return ret_val;
-
- reg_data = er32(CTRL_EXT);
- reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- ew32(CTRL_EXT, reg_data);
-
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Do not init these registers when the HW is in IAMT mode, since the
- * firmware will have already initialized them. We only initialize
- * them if the HW is not in IAMT mode.
- */
- if (!e1000_check_mng_mode(hw)) {
- /* Enable Electrical Idle on the PHY */
- phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
- phy_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
- phy_data);
-
- if (ret_val)
- return ret_val;
- }
-
- /* Workaround: Disable padding in Kumeran interface in the MAC
- * and in the PHY to avoid CRC errors.
- */
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
- &phy_data);
- if (ret_val)
- return ret_val;
- phy_data |= GG82563_ICR_DIS_PADDING;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
- phy_data);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ u32 led_ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_igp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ /* Wait 15ms for MAC to configure PHY from eeprom settings */
+ msleep(15);
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+
+ /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
+ if (hw->phy_type == e1000_phy_igp) {
+ /* disable lplu d3 during driver init */
+ ret_val = e1000_set_d3_lplu_state(hw, false);
+ if (ret_val) {
+ DEBUGOUT("Error Disabling LPLU D3\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure mdi-mdix settings */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ hw->dsp_config_state = e1000_dsp_config_disabled;
+ /* Force MDI for earlier revs of the IGP PHY */
+ phy_data &=
+ ~(IGP01E1000_PSCR_AUTO_MDIX |
+ IGP01E1000_PSCR_FORCE_MDI_MDIX);
+ hw->mdix = 1;
+
+ } else {
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 2:
+ phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+ break;
+ }
+ }
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* set auto-master slave resolution settings */
+ if (hw->autoneg) {
+ e1000_ms_type phy_ms_setting = hw->master_slave;
+
+ if (hw->ffe_config_state == e1000_ffe_config_active)
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+
+ if (hw->dsp_config_state == e1000_dsp_config_activated)
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+
+ /* when autonegotiation advertisement is only 1000Mbps then we
+ * should disable SmartSpeed and enable Auto MasterSlave
+ * resolution as hardware default. */
+ if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+ /* Disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ /* Set auto Master/Slave resolution process */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ ret_val =
+ e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (phy_ms_setting) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ default:
+ break;
+ }
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
}
-/********************************************************************
-* Copper link setup for e1000_phy_m88 series.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
+/**
+ * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series.
+ * @hw: Struct containing variables accessed by shared code
+ */
static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_mgp_setup");
-
- if (hw->phy_reset_disable)
- return E1000_SUCCESS;
-
- /* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
-
- /* Options:
- * MDI/MDI-X = 0 (default)
- * 0 - Auto for all speeds
- * 1 - MDI mode
- * 2 - MDI-X mode
- * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
- */
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
-
- switch (hw->mdix) {
- case 1:
- phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
- break;
- case 2:
- phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
- break;
- case 3:
- phy_data |= M88E1000_PSCR_AUTO_X_1000T;
- break;
- case 0:
- default:
- phy_data |= M88E1000_PSCR_AUTO_X_MODE;
- break;
- }
-
- /* Options:
- * disable_polarity_correction = 0 (default)
- * Automatic Correction for Reversed Cable Polarity
- * 0 - Disabled
- * 1 - Enabled
- */
- phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (hw->disable_polarity_correction == 1)
- phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- if (hw->phy_revision < M88E1011_I_REV_4) {
- /* Force TX_CLK in the Extended PHY Specific Control Register
- * to 25MHz clock.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
-
- if ((hw->phy_revision == E1000_REVISION_2) &&
- (hw->phy_id == M88E1111_I_PHY_ID)) {
- /* Vidalia Phy, set the downshift counter to 5x */
- phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
- phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
- ret_val = e1000_write_phy_reg(hw,
- M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- } else {
- /* Configure Master and Slave downshift values */
- phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
- phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
- M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
- ret_val = e1000_write_phy_reg(hw,
- M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- }
- }
-
- /* SW Reset the PHY so all changes take effect */
- ret_val = e1000_phy_reset(hw);
- if (ret_val) {
- DEBUGOUT("Error Resetting the PHY\n");
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+ if (hw->phy_reset_disable)
+ return E1000_SUCCESS;
+
+ /* Enable CRS on TX. This must be set for half-duplex operation. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+ /* Options:
+ * MDI/MDI-X = 0 (default)
+ * 0 - Auto for all speeds
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+ switch (hw->mdix) {
+ case 1:
+ phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+ break;
+ case 2:
+ phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+ break;
+ case 3:
+ phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+ break;
+ case 0:
+ default:
+ phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+ break;
+ }
+
+ /* Options:
+ * disable_polarity_correction = 0 (default)
+ * Automatic Correction for Reversed Cable Polarity
+ * 0 - Disabled
+ * 1 - Enabled
+ */
+ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+ if (hw->disable_polarity_correction == 1)
+ phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_revision < M88E1011_I_REV_4) {
+ /* Force TX_CLK in the Extended PHY Specific Control Register
+ * to 25MHz clock.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+ if ((hw->phy_revision == E1000_REVISION_2) &&
+ (hw->phy_id == M88E1111_I_PHY_ID)) {
+ /* Vidalia Phy, set the downshift counter to 5x */
+ phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+ phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Configure Master and Slave downshift values */
+ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+ ret_val = e1000_write_phy_reg(hw,
+ M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ /* SW Reset the PHY so all changes take effect */
+ ret_val = e1000_phy_reset(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting the PHY\n");
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
}
-/********************************************************************
-* Setup auto-negotiation and flow control advertisements,
-* and then perform auto-negotiation.
-*
-* hw - Struct containing variables accessed by shared code
-*********************************************************************/
+/**
+ * e1000_copper_link_autoneg - setup auto-neg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Setup auto-negotiation and flow control advertisements,
+ * and then perform auto-negotiation.
+ */
static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_copper_link_autoneg");
-
- /* Perform some bounds checking on the hw->autoneg_advertised
- * parameter. If this variable is zero, then set it to the default.
- */
- hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
-
- /* If autoneg_advertised is zero, we assume it was not defaulted
- * by the calling code so we set to advertise full capability.
- */
- if (hw->autoneg_advertised == 0)
- hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
-
- /* IFE phy only supports 10/100 */
- if (hw->phy_type == e1000_phy_ife)
- hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
-
- DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
- ret_val = e1000_phy_setup_autoneg(hw);
- if (ret_val) {
- DEBUGOUT("Error Setting up Auto-Negotiation\n");
- return ret_val;
- }
- DEBUGOUT("Restarting Auto-Neg\n");
-
- /* Restart auto-negotiation by setting the Auto Neg Enable bit and
- * the Auto Neg Restart bit in the PHY control register.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* Does the user want to wait for Auto-Neg to complete here, or
- * check at a later time (for example, callback routine).
- */
- if (hw->wait_autoneg_complete) {
- ret_val = e1000_wait_autoneg(hw);
- if (ret_val) {
- DEBUGOUT("Error while waiting for autoneg to complete\n");
- return ret_val;
- }
- }
-
- hw->get_link_status = true;
-
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the hw->autoneg_advertised
+ * parameter. If this variable is zero, then set it to the default.
+ */
+ hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (hw->autoneg_advertised == 0)
+ hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
+ }
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (hw->wait_autoneg_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT
+ ("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->get_link_status = true;
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Config the MAC and the PHY after link is up.
-* 1) Set up the MAC to the current PHY speed/duplex
-* if we are on 82543. If we
-* are on newer silicon, we only need to configure
-* collision distance in the Transmit Control Register.
-* 2) Set up flow control on the MAC to that established with
-* the link partner.
-* 3) Config DSP to improve Gigabit link quality for some PHY revisions.
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_copper_link_postconfig - post link setup
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Config the MAC and the PHY after link is up.
+ * 1) Set up the MAC to the current PHY speed/duplex
+ * if we are on 82543. If we
+ * are on newer silicon, we only need to configure
+ * collision distance in the Transmit Control Register.
+ * 2) Set up flow control on the MAC to that established with
+ * the link partner.
+ * 3) Config DSP to improve Gigabit link quality for some PHY revisions.
+ */
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
- s32 ret_val;
- DEBUGFUNC("e1000_copper_link_postconfig");
-
- if (hw->mac_type >= e1000_82544) {
- e1000_config_collision_dist(hw);
- } else {
- ret_val = e1000_config_mac_to_phy(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
- return ret_val;
- }
- }
- ret_val = e1000_config_fc_after_link_up(hw);
- if (ret_val) {
- DEBUGOUT("Error Configuring Flow Control\n");
- return ret_val;
- }
-
- /* Config DSP to improve Giga link quality */
- if (hw->phy_type == e1000_phy_igp) {
- ret_val = e1000_config_dsp_after_link_change(hw, true);
- if (ret_val) {
- DEBUGOUT("Error Configuring DSP after link up\n");
- return ret_val;
- }
- }
-
- return E1000_SUCCESS;
+ s32 ret_val;
+ DEBUGFUNC("e1000_copper_link_postconfig");
+
+ if (hw->mac_type >= e1000_82544) {
+ e1000_config_collision_dist(hw);
+ } else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring Flow Control\n");
+ return ret_val;
+ }
+
+ /* Config DSP to improve Giga link quality */
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_config_dsp_after_link_change(hw, true);
+ if (ret_val) {
+ DEBUGOUT("Error Configuring DSP after link up\n");
+ return ret_val;
+ }
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Detects which PHY is present and setup the speed and duplex
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_setup_copper_link - phy/speed/duplex setting
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Detects which PHY is present and sets up the speed and duplex
+ */
static s32 e1000_setup_copper_link(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 i;
- u16 phy_data;
- u16 reg_data = 0;
-
- DEBUGFUNC("e1000_setup_copper_link");
-
- switch (hw->mac_type) {
- case e1000_80003es2lan:
- case e1000_ich8lan:
- /* Set the mac to wait the maximum time between each
- * iteration and increase the max iterations when
- * polling the phy; this fixes erroneous timeouts at 10Mbps. */
- ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
- if (ret_val)
- return ret_val;
- reg_data |= 0x3F;
- ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
- if (ret_val)
- return ret_val;
- default:
- break;
- }
-
- /* Check if it is a valid PHY and set PHY mode if necessary. */
- ret_val = e1000_copper_link_preconfig(hw);
- if (ret_val)
- return ret_val;
-
- switch (hw->mac_type) {
- case e1000_80003es2lan:
- /* Kumeran registers are written-only */
- reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
- reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
- ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
- break;
- default:
- break;
- }
-
- if (hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2) {
- ret_val = e1000_copper_link_igp_setup(hw);
- if (ret_val)
- return ret_val;
- } else if (hw->phy_type == e1000_phy_m88) {
- ret_val = e1000_copper_link_mgp_setup(hw);
- if (ret_val)
- return ret_val;
- } else if (hw->phy_type == e1000_phy_gg82563) {
- ret_val = e1000_copper_link_ggp_setup(hw);
- if (ret_val)
- return ret_val;
- }
-
- if (hw->autoneg) {
- /* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
- ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val)
- return ret_val;
- } else {
- /* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
- DEBUGOUT("Forcing speed and duplex\n");
- ret_val = e1000_phy_force_speed_duplex(hw);
- if (ret_val) {
- DEBUGOUT("Error Forcing Speed and Duplex\n");
- return ret_val;
- }
- }
-
- /* Check link status. Wait up to 100 microseconds for link to become
- * valid.
- */
- for (i = 0; i < 10; i++) {
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- if (phy_data & MII_SR_LINK_STATUS) {
- /* Config the MAC and PHY after link is up */
- ret_val = e1000_copper_link_postconfig(hw);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT("Valid link established!!!\n");
- return E1000_SUCCESS;
- }
- udelay(10);
- }
-
- DEBUGOUT("Unable to establish link!!!\n");
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_setup_copper_link");
+
+ /* Check if it is a valid PHY and set PHY mode if necessary. */
+ ret_val = e1000_copper_link_preconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_copper_link_igp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_copper_link_mgp_setup(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (hw->autoneg) {
+ /* Setup autoneg and flow control advertisement
+ * and perform autonegotiation */
+ ret_val = e1000_copper_link_autoneg(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* PHY will be set to 10H, 10F, 100H,or 100F
+ * depending on value from forced_speed_duplex. */
+ DEBUGOUT("Forcing speed and duplex\n");
+ ret_val = e1000_phy_force_speed_duplex(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
+ }
+ }
+
+ /* Check link status. Wait up to 100 microseconds for link to become
+ * valid.
+ */
+ for (i = 0; i < 10; i++) {
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ /* Config the MAC and PHY after link is up */
+ ret_val = e1000_copper_link_postconfig(hw);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT("Valid link established!!!\n");
+ return E1000_SUCCESS;
+ }
+ udelay(10);
+ }
+
+ DEBUGOUT("Unable to establish link!!!\n");
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Configure the MAC-to-PHY interface for 10/100Mbps
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
+/**
+ * e1000_phy_setup_autoneg - phy settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Configures PHY autoneg and flow control advertisement settings
+ */
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
- u32 tipg;
- u16 reg_data;
+ s32 ret_val;
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
- DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+ DEBUGFUNC("e1000_phy_setup_autoneg");
- reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
- ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
- /* Configure Transmit Inter-Packet Gap */
- tipg = er32(TIPG);
- tipg &= ~E1000_TIPG_IPGT_MASK;
- tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
- ew32(TIPG, tipg);
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ /* Need to parse both autoneg_advertised and fc and set up
+ * the appropriate PHY registers. First we will parse for
+ * autoneg_advertised software override. Since we can advertise
+ * a plethora of combinations, we need to check each bit
+ * individually.
+ */
- if (ret_val)
- return ret_val;
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
- if (duplex == HALF_DUPLEX)
- reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
- else
- reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+ DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+ /* Do we want to advertise 10 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+ DEBUGOUT("Advertise 10mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ }
- return ret_val;
-}
+ /* Do we want to advertise 10 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+ DEBUGOUT("Advertise 10mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ }
-static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
-{
- s32 ret_val = E1000_SUCCESS;
- u16 reg_data;
- u32 tipg;
+ /* Do we want to advertise 100 Mb Half Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+ DEBUGOUT("Advertise 100mb Half duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ }
- DEBUGFUNC("e1000_configure_kmrn_for_1000");
+ /* Do we want to advertise 100 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+ DEBUGOUT("Advertise 100mb Full duplex\n");
+ mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ }
- reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
- ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
- if (ret_val)
- return ret_val;
+ /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+ if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+ DEBUGOUT
+ ("Advertise 1000mb Half duplex requested, request denied!\n");
+ }
- /* Configure Transmit Inter-Packet Gap */
- tipg = er32(TIPG);
- tipg &= ~E1000_TIPG_IPGT_MASK;
- tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
- ew32(TIPG, tipg);
+ /* Do we want to advertise 1000 Mb Full Duplex? */
+ if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
+ mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ }
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+ /* Check for a software override of the flow control settings, and
+ * setup the PHY advertisement registers accordingly. If
+ * auto-negotiation is enabled, then software will have to set the
+ * "PAUSE" bits to the correct value in the Auto-Negotiation
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * but we do not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * other: No software override. The flow control configuration
+ * in the EEPROM is used.
+ */
+ switch (hw->fc) {
+ case E1000_FC_NONE: /* 0 */
+ /* Flow control (RX & TX) is completely disabled by a
+ * software over-ride.
+ */
+ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_RX_PAUSE: /* 1 */
+ /* RX Flow control is enabled, and TX Flow control is
+ * disabled, by a software over-ride.
+ */
+ /* Since there really isn't a way to advertise that we are
+ * capable of RX Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric RX PAUSE. Later
+ * (in e1000_config_fc_after_link_up) we will disable the
+ *hw's ability to send PAUSE frames.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ case E1000_FC_TX_PAUSE: /* 2 */
+ /* TX Flow control is enabled, and RX Flow control is
+ * disabled, by a software over-ride.
+ */
+ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+ mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ break;
+ case E1000_FC_FULL: /* 3 */
+ /* Flow control (both RX and TX) is enabled by a software
+ * over-ride.
+ */
+ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
- if (ret_val)
- return ret_val;
+ ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
- reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- return ret_val;
-}
+ ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
-/******************************************************************************
-* Configures PHY autoneg and flow control advertisement settings
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 mii_autoneg_adv_reg;
- u16 mii_1000t_ctrl_reg;
-
- DEBUGFUNC("e1000_phy_setup_autoneg");
-
- /* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
- if (ret_val)
- return ret_val;
-
- if (hw->phy_type != e1000_phy_ife) {
- /* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
- } else
- mii_1000t_ctrl_reg=0;
-
- /* Need to parse both autoneg_advertised and fc and set up
- * the appropriate PHY registers. First we will parse for
- * autoneg_advertised software override. Since we can advertise
- * a plethora of combinations, we need to check each bit
- * individually.
- */
-
- /* First we clear all the 10/100 mb speed bits in the Auto-Neg
- * Advertisement Register (Address 4) and the 1000 mb speed bits in
- * the 1000Base-T Control Register (Address 9).
- */
- mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
- mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
-
- DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
-
- /* Do we want to advertise 10 Mb Half Duplex? */
- if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
- DEBUGOUT("Advertise 10mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
- }
-
- /* Do we want to advertise 10 Mb Full Duplex? */
- if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
- DEBUGOUT("Advertise 10mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
- }
-
- /* Do we want to advertise 100 Mb Half Duplex? */
- if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
- DEBUGOUT("Advertise 100mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
- }
-
- /* Do we want to advertise 100 Mb Full Duplex? */
- if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
- DEBUGOUT("Advertise 100mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
- }
-
- /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
- if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
- DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
- }
-
- /* Do we want to advertise 1000 Mb Full Duplex? */
- if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
- DEBUGOUT("Advertise 1000mb Full duplex\n");
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
- if (hw->phy_type == e1000_phy_ife) {
- DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
- }
- }
-
- /* Check for a software override of the flow control settings, and
- * setup the PHY advertisement registers accordingly. If
- * auto-negotiation is enabled, then software will have to set the
- * "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
- * other: No software override. The flow control configuration
- * in the EEPROM is used.
- */
- switch (hw->fc) {
- case E1000_FC_NONE: /* 0 */
- /* Flow control (RX & TX) is completely disabled by a
- * software over-ride.
- */
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- case E1000_FC_RX_PAUSE: /* 1 */
- /* RX Flow control is enabled, and TX Flow control is
- * disabled, by a software over-ride.
- */
- /* Since there really isn't a way to advertise that we are
- * capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later
- * (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
- */
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- case E1000_FC_TX_PAUSE: /* 2 */
- /* TX Flow control is enabled, and RX Flow control is
- * disabled, by a software over-ride.
- */
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
- break;
- case E1000_FC_FULL: /* 3 */
- /* Flow control (both RX and TX) is enabled by a software
- * over-ride.
- */
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
-
- if (hw->phy_type != e1000_phy_ife) {
- ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Force PHY speed and duplex settings to hw->forced_speed_duplex
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_phy_force_speed_duplex - force link settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Force PHY speed and duplex settings to hw->forced_speed_duplex
+ */
static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
{
- u32 ctrl;
- s32 ret_val;
- u16 mii_ctrl_reg;
- u16 mii_status_reg;
- u16 phy_data;
- u16 i;
-
- DEBUGFUNC("e1000_phy_force_speed_duplex");
-
- /* Turn off Flow control if we are forcing speed and duplex. */
- hw->fc = E1000_FC_NONE;
-
- DEBUGOUT1("hw->fc = %d\n", hw->fc);
-
- /* Read the Device Control Register. */
- ctrl = er32(CTRL);
-
- /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
- ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- ctrl &= ~(DEVICE_SPEED_MASK);
-
- /* Clear the Auto Speed Detect Enable bit. */
- ctrl &= ~E1000_CTRL_ASDE;
-
- /* Read the MII Control Register. */
- ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
- if (ret_val)
- return ret_val;
-
- /* We need to disable autoneg in order to force link and duplex. */
-
- mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
-
- /* Are we forcing Full or Half Duplex? */
- if (hw->forced_speed_duplex == e1000_100_full ||
- hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
- */
- ctrl |= E1000_CTRL_FD;
- mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
- } else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
- */
- ctrl &= ~E1000_CTRL_FD;
- mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
- DEBUGOUT("Half Duplex\n");
- }
-
- /* Are we forcing 100Mbps??? */
- if (hw->forced_speed_duplex == e1000_100_full ||
- hw->forced_speed_duplex == e1000_100_half) {
- /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
- ctrl |= E1000_CTRL_SPD_100;
- mii_ctrl_reg |= MII_CR_SPEED_100;
- mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- DEBUGOUT("Forcing 100mb ");
- } else {
- /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
- ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- mii_ctrl_reg |= MII_CR_SPEED_10;
- mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- DEBUGOUT("Forcing 10mb ");
- }
-
- e1000_config_collision_dist(hw);
-
- /* Write the configured values back to the Device Control Reg. */
- ew32(CTRL, ctrl);
-
- if ((hw->phy_type == e1000_phy_m88) ||
- (hw->phy_type == e1000_phy_gg82563)) {
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
- */
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
-
- /* Need to reset the PHY or these changes will be ignored */
- mii_ctrl_reg |= MII_CR_RESET;
-
- /* Disable MDI-X support for 10/100 */
- } else if (hw->phy_type == e1000_phy_ife) {
- ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IFE_PMC_AUTO_MDIX;
- phy_data &= ~IFE_PMC_FORCE_MDIX;
-
- ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
-
- } else {
- /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
- * forced whenever speed or duplex are forced.
- */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
- phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
-
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- }
-
- /* Write back the modified PHY MII control register. */
- ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
- if (ret_val)
- return ret_val;
-
- udelay(1);
-
- /* The wait_autoneg_complete flag may be a little misleading here.
- * Since we are forcing speed and duplex, Auto-Neg is not enabled.
- * But we do want to delay for a period while forcing only so we
- * don't generate false No Link messages. So we will wait here
- * only if the user has set wait_autoneg_complete to 1, which is
- * the default.
- */
- if (hw->wait_autoneg_complete) {
- /* We will wait for autoneg to complete. */
- DEBUGOUT("Waiting for forced speed/duplex link.\n");
- mii_status_reg = 0;
-
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
- for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- if (mii_status_reg & MII_SR_LINK_STATUS) break;
- msleep(100);
- }
- if ((i == 0) &&
- ((hw->phy_type == e1000_phy_m88) ||
- (hw->phy_type == e1000_phy_gg82563))) {
- /* We didn't get link. Reset the DSP and wait again for link. */
- ret_val = e1000_phy_reset_dsp(hw);
- if (ret_val) {
- DEBUGOUT("Error Resetting PHY DSP\n");
- return ret_val;
- }
- }
- /* This loop will early-out if the link condition has been met. */
- for (i = PHY_FORCE_TIME; i > 0; i--) {
- if (mii_status_reg & MII_SR_LINK_STATUS) break;
- msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
- }
- }
-
- if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_EPSCR_TX_CLK_25;
- ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
- (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
- hw->forced_speed_duplex == e1000_10_half)) {
- ret_val = e1000_polarity_reversal_workaround(hw);
- if (ret_val)
- return ret_val;
- }
- } else if (hw->phy_type == e1000_phy_gg82563) {
- /* The TX_CLK of the Extended PHY Specific Control Register defaults
- * to 2.5MHz on a reset. We need to re-force it back to 25MHz, if
- * we're not in a forced 10/duplex configuration. */
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
- if ((hw->forced_speed_duplex == e1000_10_full) ||
- (hw->forced_speed_duplex == e1000_10_half))
- phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
- else
- phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
-
- /* Also due to the reset, we need to enable CRS on Tx. */
- phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
-
- ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
- if (ret_val)
- return ret_val;
- }
- return E1000_SUCCESS;
+ u32 ctrl;
+ s32 ret_val;
+ u16 mii_ctrl_reg;
+ u16 mii_status_reg;
+ u16 phy_data;
+ u16 i;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+ /* Turn off Flow control if we are forcing speed and duplex. */
+ hw->fc = E1000_FC_NONE;
+
+ DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+ /* Read the Device Control Register. */
+ ctrl = er32(CTRL);
+
+ /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(DEVICE_SPEED_MASK);
+
+ /* Clear the Auto Speed Detect Enable bit. */
+ ctrl &= ~E1000_CTRL_ASDE;
+
+ /* Read the MII Control Register. */
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* We need to disable autoneg in order to force link and duplex. */
+
+ mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+ /* Are we forcing Full or Half Duplex? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_10_full) {
+ /* We want to force full duplex so we SET the full duplex bits in the
+ * Device and MII Control Registers.
+ */
+ ctrl |= E1000_CTRL_FD;
+ mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ /* We want to force half duplex so we CLEAR the full duplex bits in
+ * the Device and MII Control Registers.
+ */
+ ctrl &= ~E1000_CTRL_FD;
+ mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+ DEBUGOUT("Half Duplex\n");
+ }
+
+ /* Are we forcing 100Mbps??? */
+ if (hw->forced_speed_duplex == e1000_100_full ||
+ hw->forced_speed_duplex == e1000_100_half) {
+ /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+ ctrl |= E1000_CTRL_SPD_100;
+ mii_ctrl_reg |= MII_CR_SPEED_100;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ DEBUGOUT("Forcing 100mb ");
+ } else {
+ /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+ ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+ mii_ctrl_reg |= MII_CR_SPEED_10;
+ mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ DEBUGOUT("Forcing 10mb ");
+ }
+
+ e1000_config_collision_dist(hw);
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+
+ if (hw->phy_type == e1000_phy_m88) {
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ * forced whenever speed are duplex are forced.
+ */
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+ /* Need to reset the PHY or these changes will be ignored */
+ mii_ctrl_reg |= MII_CR_RESET;
+
+ /* Disable MDI-X support for 10/100 */
+ } else {
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ * forced whenever speed or duplex are forced.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+ phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ /* Write back the modified PHY MII control register. */
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ udelay(1);
+
+ /* The wait_autoneg_complete flag may be a little misleading here.
+ * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+ * But we do want to delay for a period while forcing only so we
+ * don't generate false No Link messages. So we will wait here
+ * only if the user has set wait_autoneg_complete to 1, which is
+ * the default.
+ */
+ if (hw->wait_autoneg_complete) {
+ /* We will wait for autoneg to complete. */
+ DEBUGOUT("Waiting for forced speed/duplex link.\n");
+ mii_status_reg = 0;
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ }
+ if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
+ /* We didn't get link. Reset the DSP and wait again for link. */
+ ret_val = e1000_phy_reset_dsp(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Resetting PHY DSP\n");
+ return ret_val;
+ }
+ }
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ msleep(100);
+ /* Read the MII Status Register and wait for Auto-Neg Complete bit
+ * to be set.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* Because we reset the PHY above, we need to re-force TX_CLK in the
+ * Extended PHY Specific Control Register to 25MHz clock. This value
+ * defaults back to a 2.5MHz clock when the PHY is reset.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_EPSCR_TX_CLK_25;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* In addition, because of the s/w reset above, we need to enable CRS on
+ * TX. This must be set for both full and half duplex operation.
+ */
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
+ && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ret_val = e1000_polarity_reversal_workaround(hw);
+ if (ret_val)
+ return ret_val;
+ }
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Sets the collision distance in the Transmit Control register
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Link should have been established previously. Reads the speed and duplex
-* information from the Device Status register.
-******************************************************************************/
+/**
+ * e1000_config_collision_dist - set collision distance register
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Sets the collision distance in the Transmit Control register.
+ * Link should have been established previously. Reads the speed and duplex
+ * information from the Device Status register.
+ */
void e1000_config_collision_dist(struct e1000_hw *hw)
{
- u32 tctl, coll_dist;
+ u32 tctl, coll_dist;
- DEBUGFUNC("e1000_config_collision_dist");
+ DEBUGFUNC("e1000_config_collision_dist");
- if (hw->mac_type < e1000_82543)
- coll_dist = E1000_COLLISION_DISTANCE_82542;
- else
- coll_dist = E1000_COLLISION_DISTANCE;
+ if (hw->mac_type < e1000_82543)
+ coll_dist = E1000_COLLISION_DISTANCE_82542;
+ else
+ coll_dist = E1000_COLLISION_DISTANCE;
- tctl = er32(TCTL);
+ tctl = er32(TCTL);
- tctl &= ~E1000_TCTL_COLD;
- tctl |= coll_dist << E1000_COLD_SHIFT;
+ tctl &= ~E1000_TCTL_COLD;
+ tctl |= coll_dist << E1000_COLD_SHIFT;
- ew32(TCTL, tctl);
- E1000_WRITE_FLUSH();
+ ew32(TCTL, tctl);
+ E1000_WRITE_FLUSH();
}
-/******************************************************************************
-* Sets MAC speed and duplex settings to reflect the those in the PHY
-*
-* hw - Struct containing variables accessed by shared code
-* mii_reg - data to write to the MII control register
-*
-* The contents of the PHY register containing the needed information need to
-* be passed in.
-******************************************************************************/
+/**
+ * e1000_config_mac_to_phy - sync phy and mac settings
+ * @hw: Struct containing variables accessed by shared code
+ * @mii_reg: data to write to the MII control register
+ *
+ * Sets MAC speed and duplex settings to reflect the those in the PHY
+ * The contents of the PHY register containing the needed information need to
+ * be passed in.
+ */
static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
{
- u32 ctrl;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_config_mac_to_phy");
-
- /* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
- if (hw->mac_type >= e1000_82544)
- return E1000_SUCCESS;
-
- /* Read the Device Control Register and set the bits to Force Speed
- * and Duplex.
- */
- ctrl = er32(CTRL);
- ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
-
- /* Set up duplex in the Device Control and Transmit Control
- * registers depending on negotiated values.
- */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- if (phy_data & M88E1000_PSSR_DPLX)
- ctrl |= E1000_CTRL_FD;
- else
- ctrl &= ~E1000_CTRL_FD;
-
- e1000_config_collision_dist(hw);
-
- /* Set up speed in the Device Control register depending on
- * negotiated values.
- */
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
- ctrl |= E1000_CTRL_SPD_1000;
- else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
- ctrl |= E1000_CTRL_SPD_100;
-
- /* Write the configured values back to the Device Control Reg. */
- ew32(CTRL, ctrl);
- return E1000_SUCCESS;
+ u32 ctrl;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_config_mac_to_phy");
+
+ /* 82544 or newer MAC, Auto Speed Detection takes care of
+ * MAC speed/duplex configuration.*/
+ if (hw->mac_type >= e1000_82544)
+ return E1000_SUCCESS;
+
+ /* Read the Device Control Register and set the bits to Force Speed
+ * and Duplex.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+ /* Set up duplex in the Device Control and Transmit Control
+ * registers depending on negotiated values.
+ */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & M88E1000_PSSR_DPLX)
+ ctrl |= E1000_CTRL_FD;
+ else
+ ctrl &= ~E1000_CTRL_FD;
+
+ e1000_config_collision_dist(hw);
+
+ /* Set up speed in the Device Control register depending on
+ * negotiated values.
+ */
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+ ctrl |= E1000_CTRL_SPD_1000;
+ else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+ ctrl |= E1000_CTRL_SPD_100;
+
+ /* Write the configured values back to the Device Control Reg. */
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Forces the MAC's flow control settings.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_force_mac_fc - force flow control settings
+ * @hw: Struct containing variables accessed by shared code
*
+ * Forces the MAC's flow control settings.
* Sets the TFCE and RFCE bits in the device control register to reflect
* the adapter settings. TFCE and RFCE need to be explicitly set by
* software when a Copper PHY is used because autonegotiation is managed
* by the PHY rather than the MAC. Software must also configure these
* bits when link is forced on a fiber connection.
- *****************************************************************************/
+ */
s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
- u32 ctrl;
-
- DEBUGFUNC("e1000_force_mac_fc");
-
- /* Get the current configuration of the Device Control Register */
- ctrl = er32(CTRL);
-
- /* Because we didn't get link via the internal auto-negotiation
- * mechanism (we either forced link or we got link via PHY
- * auto-neg), we have to manually enable/disable transmit an
- * receive flow control.
- *
- * The "Case" statement below enables/disable flow control
- * according to the "hw->fc" parameter.
- *
- * The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause
- * frames but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
- * 3: Both Rx and TX flow control (symmetric) is enabled.
- * other: No other values should be possible at this point.
- */
-
- switch (hw->fc) {
- case E1000_FC_NONE:
- ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
- break;
- case E1000_FC_RX_PAUSE:
- ctrl &= (~E1000_CTRL_TFCE);
- ctrl |= E1000_CTRL_RFCE;
- break;
- case E1000_FC_TX_PAUSE:
- ctrl &= (~E1000_CTRL_RFCE);
- ctrl |= E1000_CTRL_TFCE;
- break;
- case E1000_FC_FULL:
- ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
- break;
- default:
- DEBUGOUT("Flow control param set incorrectly\n");
- return -E1000_ERR_CONFIG;
- }
-
- /* Disable TX Flow Control for 82542 (rev 2.0) */
- if (hw->mac_type == e1000_82542_rev2_0)
- ctrl &= (~E1000_CTRL_TFCE);
-
- ew32(CTRL, ctrl);
- return E1000_SUCCESS;
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_force_mac_fc");
+
+ /* Get the current configuration of the Device Control Register */
+ ctrl = er32(CTRL);
+
+ /* Because we didn't get link via the internal auto-negotiation
+ * mechanism (we either forced link or we got link via PHY
+ * auto-neg), we have to manually enable/disable transmit an
+ * receive flow control.
+ *
+ * The "Case" statement below enables/disable flow control
+ * according to the "hw->fc" parameter.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause
+ * frames but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames
+ * frames but we do not receive pause frames).
+ * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * other: No other values should be possible at this point.
+ */
+
+ switch (hw->fc) {
+ case E1000_FC_NONE:
+ ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+ break;
+ case E1000_FC_RX_PAUSE:
+ ctrl &= (~E1000_CTRL_TFCE);
+ ctrl |= E1000_CTRL_RFCE;
+ break;
+ case E1000_FC_TX_PAUSE:
+ ctrl &= (~E1000_CTRL_RFCE);
+ ctrl |= E1000_CTRL_TFCE;
+ break;
+ case E1000_FC_FULL:
+ ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Disable TX Flow Control for 82542 (rev 2.0) */
+ if (hw->mac_type == e1000_82542_rev2_0)
+ ctrl &= (~E1000_CTRL_TFCE);
+
+ ew32(CTRL, ctrl);
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Configures flow control settings after link is established
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_config_fc_after_link_up - configure flow control after autoneg
+ * @hw: Struct containing variables accessed by shared code
*
+ * Configures flow control settings after link is established
* Should be called immediately after a valid link has been established.
* Forces MAC flow control settings if link was forced. When in MII/GMII mode
* and autonegotiation is enabled, the MAC flow control settings will be set
* based on the flow control negotiated by the PHY. In TBI mode, the TFCE
- * and RFCE bits will be automaticaly set to the negotiated flow control mode.
- *****************************************************************************/
+ * and RFCE bits will be automatically set to the negotiated flow control mode.
+ */
static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 mii_status_reg;
- u16 mii_nway_adv_reg;
- u16 mii_nway_lp_ability_reg;
- u16 speed;
- u16 duplex;
-
- DEBUGFUNC("e1000_config_fc_after_link_up");
-
- /* Check for the case where we have fiber media and auto-neg failed
- * so we had to force link. In this case, we need to force the
- * configuration of the MAC to match the "fc" parameter.
- */
- if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
- ((hw->media_type == e1000_media_type_internal_serdes) &&
- (hw->autoneg_failed)) ||
- ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
- ret_val = e1000_force_mac_fc(hw);
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- return ret_val;
- }
- }
-
- /* Check for the case where we have copper media and auto-neg is
- * enabled. In this case, we need to check and see if Auto-Neg
- * has completed, and if so, how the PHY and link partner has
- * flow control configured.
- */
- if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
- /* Read the MII Status Register and check to see if AutoNeg
- * has completed. We read this twice because this reg has
- * some "sticky" (latched) bits.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
- /* The AutoNeg process has completed, so we now need to
- * read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
- if (ret_val)
- return ret_val;
-
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
- * NOTE: DC = Don't Care
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
- * 0 | 0 | DC | DC | E1000_FC_NONE
- * 0 | 1 | 0 | DC | E1000_FC_NONE
- * 0 | 1 | 1 | 0 | E1000_FC_NONE
- * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
- * 1 | 0 | 0 | DC | E1000_FC_NONE
- * 1 | DC | 1 | DC | E1000_FC_FULL
- * 1 | 1 | 0 | 0 | E1000_FC_NONE
- * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
- *
- */
- /* Are both PAUSE bits set to 1? If so, this implies
- * Symmetric Flow Control is enabled at both ends. The
- * ASM_DIR bits are irrelevant per the spec.
- *
- * For Symmetric Flow Control:
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | DC | 1 | DC | E1000_FC_FULL
- *
- */
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
- */
- if (hw->original_fc == E1000_FC_FULL) {
- hw->fc = E1000_FC_FULL;
- DEBUGOUT("Flow Control = FULL.\n");
- } else {
- hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- }
- }
- /* For receiving PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
- *
- */
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc = E1000_FC_TX_PAUSE;
- DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
- }
- /* For transmitting PAUSE frames ONLY.
- *
- * LOCAL DEVICE | LINK PARTNER
- * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
- * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
- *
- */
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
- hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- }
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
- */
- else if ((hw->original_fc == E1000_FC_NONE ||
- hw->original_fc == E1000_FC_TX_PAUSE) ||
- hw->fc_strict_ieee) {
- hw->fc = E1000_FC_NONE;
- DEBUGOUT("Flow Control = NONE.\n");
- } else {
- hw->fc = E1000_FC_RX_PAUSE;
- DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
- }
-
- /* Now we need to do one last check... If we auto-
- * negotiated to HALF DUPLEX, flow control should not be
- * enabled per IEEE 802.3 spec.
- */
- ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
- return ret_val;
- }
-
- if (duplex == HALF_DUPLEX)
- hw->fc = E1000_FC_NONE;
-
- /* Now we call a subroutine to actually force the MAC
- * controller to use the correct flow control settings.
- */
- ret_val = e1000_force_mac_fc(hw);
- if (ret_val) {
- DEBUGOUT("Error forcing flow control settings\n");
- return ret_val;
- }
- } else {
- DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
- }
- }
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 mii_nway_adv_reg;
+ u16 mii_nway_lp_ability_reg;
+ u16 speed;
+ u16 duplex;
+
+ DEBUGFUNC("e1000_config_fc_after_link_up");
+
+ /* Check for the case where we have fiber media and auto-neg failed
+ * so we had to force link. In this case, we need to force the
+ * configuration of the MAC to match the "fc" parameter.
+ */
+ if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_internal_serdes)
+ && (hw->autoneg_failed))
+ || ((hw->media_type == e1000_media_type_copper)
+ && (!hw->autoneg))) {
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Check for the case where we have copper media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+ /* Read the MII Status Register and check to see if AutoNeg
+ * has completed. We read this twice because this reg has
+ * some "sticky" (latched) bits.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement Register
+ * (Address 4) and the Auto_Negotiation Base Page Ability
+ * Register (Address 5) to determine how flow control was
+ * negotiated.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+ &mii_nway_adv_reg);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+ &mii_nway_lp_ability_reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (Address 4) and two bits in the Auto Negotiation Base
+ * Page Ability Register (Address 5) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | E1000_FC_NONE
+ * 0 | 1 | 0 | DC | E1000_FC_NONE
+ * 0 | 1 | 1 | 0 | E1000_FC_NONE
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ * 1 | 0 | 0 | DC | E1000_FC_NONE
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ * 1 | 1 | 0 | 0 | E1000_FC_NONE
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ /* Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | E1000_FC_FULL
+ *
+ */
+ if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ /* Now we need to check if the user selected RX ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->original_fc == E1000_FC_FULL) {
+ hw->fc = E1000_FC_FULL;
+ DEBUGOUT("Flow Control = FULL.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
+ *
+ */
+ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_TX_PAUSE;
+ DEBUGOUT
+ ("Flow Control = TX PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
+ *
+ */
+ else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+ (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+ !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+ (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
+ {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+ /* Per the IEEE spec, at this point flow control should be
+ * disabled. However, we want to consider that we could
+ * be connected to a legacy switch that doesn't advertise
+ * desired flow control, but can be forced on the link
+ * partner. So if we advertised no flow control, that is
+ * what we will resolve to. If we advertised some kind of
+ * receive capability (Rx Pause Only or Full Flow Control)
+ * and the link partner advertised none, we will configure
+ * ourselves to enable Rx Flow Control only. We can do
+ * this safely for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx, no
+ * harm done since we won't be receiving any PAUSE frames
+ * anyway. If the intent on the link partner was to have
+ * flow control enabled, then by us enabling RX only, we
+ * can at least receive pause frames and process them.
+ * This is a good idea because in most cases, since we are
+ * predominantly a server NIC, more times than not we will
+ * be asked to delay transmission of packets than asking
+ * our link partner to pause transmission of frames.
+ */
+ else if ((hw->original_fc == E1000_FC_NONE ||
+ hw->original_fc == E1000_FC_TX_PAUSE) ||
+ hw->fc_strict_ieee) {
+ hw->fc = E1000_FC_NONE;
+ DEBUGOUT("Flow Control = NONE.\n");
+ } else {
+ hw->fc = E1000_FC_RX_PAUSE;
+ DEBUGOUT
+ ("Flow Control = RX PAUSE frames only.\n");
+ }
+
+ /* Now we need to do one last check... If we auto-
+ * negotiated to HALF DUPLEX, flow control should not be
+ * enabled per IEEE 802.3 spec.
+ */
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+
+ if (duplex == HALF_DUPLEX)
+ hw->fc = E1000_FC_NONE;
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ ret_val = e1000_force_mac_fc(hw);
+ if (ret_val) {
+ DEBUGOUT
+ ("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ } else {
+ DEBUGOUT
+ ("Copper PHY and Auto Neg has not completed.\n");
+ }
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Checks to see if the link status of the hardware has changed.
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
*
- * hw - Struct containing variables accessed by shared code
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ */
+static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+ rxcw = er32(RXCW);
+
+ /*
+ * If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+ if (hw->autoneg_failed == 0) {
+ hw->autoneg_failed = 1;
+ goto out;
+ }
+ DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = er32(CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ ew32(CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ goto out;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /*
+ * If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+ ew32(TXCW, hw->txcw);
+ ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ hw->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+ /*
+ * If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - forced.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & er32(TXCW)) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ udelay(10);
+ rxcw = er32(RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ hw->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - autoneg "
+ "completed successfully.\n");
+ } else {
+ hw->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - invalid"
+ "codewords detected in autoneg.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ hw->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ out:
+ return ret_val;
+}
+
+/**
+ * e1000_check_for_link
+ * @hw: Struct containing variables accessed by shared code
*
+ * Checks to see if the link status of the hardware has changed.
* Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
+ */
s32 e1000_check_for_link(struct e1000_hw *hw)
{
- u32 rxcw = 0;
- u32 ctrl;
- u32 status;
- u32 rctl;
- u32 icr;
- u32 signal = 0;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_check_for_link");
-
- ctrl = er32(CTRL);
- status = er32(STATUS);
-
- /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
- * set when the optics detect a signal. On older adapters, it will be
- * cleared when there is a signal. This applies to fiber media only.
- */
- if ((hw->media_type == e1000_media_type_fiber) ||
- (hw->media_type == e1000_media_type_internal_serdes)) {
- rxcw = er32(RXCW);
-
- if (hw->media_type == e1000_media_type_fiber) {
- signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
- if (status & E1000_STATUS_LU)
- hw->get_link_status = false;
- }
- }
-
- /* If we have a copper PHY then we only want to go out to the PHY
- * registers to see if Auto-Neg has completed and/or if our link
- * status has changed. The get_link_status flag will be set if we
- * receive a Link Status Change interrupt or we have Rx Sequence
- * Errors.
- */
- if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
- /* First we want to see if the MII Status Register reports
- * link. If so, then we want to get the current speed/duplex
- * of the PHY.
- * Read the register twice since the link bit is sticky.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- if (phy_data & MII_SR_LINK_STATUS) {
- hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
- e1000_check_downshift(hw);
-
- /* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
- * happen due to the execution of this workaround.
- */
-
- if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
- (!hw->autoneg) &&
- (hw->forced_speed_duplex == e1000_10_full ||
- hw->forced_speed_duplex == e1000_10_half)) {
- ew32(IMC, 0xffffffff);
- ret_val = e1000_polarity_reversal_workaround(hw);
- icr = er32(ICR);
- ew32(ICS, (icr & ~E1000_ICS_LSC));
- ew32(IMS, IMS_ENABLE_MASK);
- }
-
- } else {
- /* No link detected */
- e1000_config_dsp_after_link_change(hw, false);
- return 0;
- }
-
- /* If we are forcing speed/duplex, then we simply return since
- * we have already determined whether we have link or not.
- */
- if (!hw->autoneg) return -E1000_ERR_CONFIG;
-
- /* optimize the dsp settings for the igp phy */
- e1000_config_dsp_after_link_change(hw, true);
-
- /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
- * have Si on board that is 82544 or newer, Auto
- * Speed Detection takes care of MAC speed/duplex
- * configuration. So we only need to configure Collision
- * Distance in the MAC. Otherwise, we need to force
- * speed/duplex on the MAC to the current PHY speed/duplex
- * settings.
- */
- if (hw->mac_type >= e1000_82544)
- e1000_config_collision_dist(hw);
- else {
- ret_val = e1000_config_mac_to_phy(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring MAC to PHY settings\n");
- return ret_val;
- }
- }
-
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
- */
- ret_val = e1000_config_fc_after_link_up(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
- return ret_val;
- }
-
- /* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
- */
- if (hw->tbi_compatibility_en) {
- u16 speed, duplex;
- ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
- return ret_val;
- }
- if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
- */
- if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
- rctl = er32(RCTL);
- rctl &= ~E1000_RCTL_SBP;
- ew32(RCTL, rctl);
- hw->tbi_compatibility_on = false;
- }
- } else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
- * will look like CRC errors to the hardware.
- */
- if (!hw->tbi_compatibility_on) {
- hw->tbi_compatibility_on = true;
- rctl = er32(RCTL);
- rctl |= E1000_RCTL_SBP;
- ew32(RCTL, rctl);
- }
- }
- }
- }
- /* If we don't have link (auto-negotiation failed or link partner cannot
- * auto-negotiate), the cable is plugged in (we have signal), and our
- * link partner is not trying to auto-negotiate with us (we are receiving
- * idles or data), we need to force link up. We also need to give
- * auto-negotiation time to complete, in case the cable was just plugged
- * in. The autoneg_failed flag does this.
- */
- else if ((((hw->media_type == e1000_media_type_fiber) &&
- ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
- (hw->media_type == e1000_media_type_internal_serdes)) &&
- (!(status & E1000_STATUS_LU)) &&
- (!(rxcw & E1000_RXCW_C))) {
- if (hw->autoneg_failed == 0) {
- hw->autoneg_failed = 1;
- return 0;
- }
- DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
-
- /* Disable auto-negotiation in the TXCW register */
- ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
-
- /* Force link-up and also force full-duplex. */
- ctrl = er32(CTRL);
- ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
- ew32(CTRL, ctrl);
-
- /* Configure Flow Control after forcing link up. */
- ret_val = e1000_config_fc_after_link_up(hw);
- if (ret_val) {
- DEBUGOUT("Error configuring flow control\n");
- return ret_val;
- }
- }
- /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
- * auto-negotiation in the TXCW register and disable forced link in the
- * Device Control register in an attempt to auto-negotiate with our link
- * partner.
- */
- else if (((hw->media_type == e1000_media_type_fiber) ||
- (hw->media_type == e1000_media_type_internal_serdes)) &&
- (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
- ew32(TXCW, hw->txcw);
- ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
-
- hw->serdes_link_down = false;
- }
- /* If we force link for non-auto-negotiation switch, check link status
- * based on MAC synchronization for internal serdes media type.
- */
- else if ((hw->media_type == e1000_media_type_internal_serdes) &&
- !(E1000_TXCW_ANE & er32(TXCW))) {
- /* SYNCH bit and IV bit are sticky. */
- udelay(10);
- if (E1000_RXCW_SYNCH & er32(RXCW)) {
- if (!(rxcw & E1000_RXCW_IV)) {
- hw->serdes_link_down = false;
- DEBUGOUT("SERDES: Link is up.\n");
- }
- } else {
- hw->serdes_link_down = true;
- DEBUGOUT("SERDES: Link is down.\n");
- }
- }
- if ((hw->media_type == e1000_media_type_internal_serdes) &&
- (E1000_TXCW_ANE & er32(TXCW))) {
- hw->serdes_link_down = !(E1000_STATUS_LU & er32(STATUS));
- }
- return E1000_SUCCESS;
+ u32 rxcw = 0;
+ u32 ctrl;
+ u32 status;
+ u32 rctl;
+ u32 icr;
+ u32 signal = 0;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_for_link");
+
+ ctrl = er32(CTRL);
+ status = er32(STATUS);
+
+ /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
+ * set when the optics detect a signal. On older adapters, it will be
+ * cleared when there is a signal. This applies to fiber media only.
+ */
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes)) {
+ rxcw = er32(RXCW);
+
+ if (hw->media_type == e1000_media_type_fiber) {
+ signal =
+ (hw->mac_type >
+ e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+ if (status & E1000_STATUS_LU)
+ hw->get_link_status = false;
+ }
+ }
+
+ /* If we have a copper PHY then we only want to go out to the PHY
+ * registers to see if Auto-Neg has completed and/or if our link
+ * status has changed. The get_link_status flag will be set if we
+ * receive a Link Status Change interrupt or we have Rx Sequence
+ * Errors.
+ */
+ if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+ /* First we want to see if the MII Status Register reports
+ * link. If so, then we want to get the current speed/duplex
+ * of the PHY.
+ * Read the register twice since the link bit is sticky.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (phy_data & MII_SR_LINK_STATUS) {
+ hw->get_link_status = false;
+ /* Check if there was DownShift, must be checked immediately after
+ * link-up */
+ e1000_check_downshift(hw);
+
+ /* If we are on 82544 or 82543 silicon and speed/duplex
+ * are forced to 10H or 10F, then we will implement the polarity
+ * reversal workaround. We disable interrupts first, and upon
+ * returning, place the devices interrupt state to its previous
+ * value except for the link status change interrupt which will
+ * happen due to the execution of this workaround.
+ */
+
+ if ((hw->mac_type == e1000_82544
+ || hw->mac_type == e1000_82543) && (!hw->autoneg)
+ && (hw->forced_speed_duplex == e1000_10_full
+ || hw->forced_speed_duplex == e1000_10_half)) {
+ ew32(IMC, 0xffffffff);
+ ret_val =
+ e1000_polarity_reversal_workaround(hw);
+ icr = er32(ICR);
+ ew32(ICS, (icr & ~E1000_ICS_LSC));
+ ew32(IMS, IMS_ENABLE_MASK);
+ }
+
+ } else {
+ /* No link detected */
+ e1000_config_dsp_after_link_change(hw, false);
+ return 0;
+ }
+
+ /* If we are forcing speed/duplex, then we simply return since
+ * we have already determined whether we have link or not.
+ */
+ if (!hw->autoneg)
+ return -E1000_ERR_CONFIG;
+
+ /* optimize the dsp settings for the igp phy */
+ e1000_config_dsp_after_link_change(hw, true);
+
+ /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
+ * have Si on board that is 82544 or newer, Auto
+ * Speed Detection takes care of MAC speed/duplex
+ * configuration. So we only need to configure Collision
+ * Distance in the MAC. Otherwise, we need to force
+ * speed/duplex on the MAC to the current PHY speed/duplex
+ * settings.
+ */
+ if (hw->mac_type >= e1000_82544)
+ e1000_config_collision_dist(hw);
+ else {
+ ret_val = e1000_config_mac_to_phy(hw);
+ if (ret_val) {
+ DEBUGOUT
+ ("Error configuring MAC to PHY settings\n");
+ return ret_val;
+ }
+ }
+
+ /* Configure Flow Control now that Auto-Neg has completed. First, we
+ * need to restore the desired flow control settings because we may
+ * have had to re-autoneg with a different link partner.
+ */
+ ret_val = e1000_config_fc_after_link_up(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+
+ /* At this point we know that we are on copper and we have
+ * auto-negotiated link. These are conditions for checking the link
+ * partner capability register. We use the link speed to determine if
+ * TBI compatibility needs to be turned on or off. If the link is not
+ * at gigabit speed, then TBI compatibility is not needed. If we are
+ * at gigabit speed, we turn on TBI compatibility.
+ */
+ if (hw->tbi_compatibility_en) {
+ u16 speed, duplex;
+ ret_val =
+ e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT
+ ("Error getting link speed and duplex\n");
+ return ret_val;
+ }
+ if (speed != SPEED_1000) {
+ /* If link speed is not set to gigabit speed, we do not need
+ * to enable TBI compatibility.
+ */
+ if (hw->tbi_compatibility_on) {
+ /* If we previously were in the mode, turn it off. */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ hw->tbi_compatibility_on = false;
+ }
+ } else {
+ /* If TBI compatibility is was previously off, turn it on. For
+ * compatibility with a TBI link partner, we will store bad
+ * packets. Some frames have an additional byte on the end and
+ * will look like CRC errors to to the hardware.
+ */
+ if (!hw->tbi_compatibility_on) {
+ hw->tbi_compatibility_on = true;
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_SBP;
+ ew32(RCTL, rctl);
+ }
+ }
+ }
+ }
+
+ if ((hw->media_type == e1000_media_type_fiber) ||
+ (hw->media_type == e1000_media_type_internal_serdes))
+ e1000_check_for_serdes_link_generic(hw);
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
+/**
+ * e1000_get_speed_and_duplex
+ * @hw: Struct containing variables accessed by shared code
+ * @speed: Speed of the connection
+ * @duplex: Duplex setting of the connection
+
* Detects the current speed and duplex settings of the hardware.
- *
- * hw - Struct containing variables accessed by shared code
- * speed - Speed of the connection
- * duplex - Duplex setting of the connection
- *****************************************************************************/
+ */
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
{
- u32 status;
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_get_speed_and_duplex");
-
- if (hw->mac_type >= e1000_82543) {
- status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
- *speed = SPEED_1000;
- DEBUGOUT("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
- *speed = SPEED_100;
- DEBUGOUT("100 Mbs, ");
- } else {
- *speed = SPEED_10;
- DEBUGOUT("10 Mbs, ");
- }
-
- if (status & E1000_STATUS_FD) {
- *duplex = FULL_DUPLEX;
- DEBUGOUT("Full Duplex\n");
- } else {
- *duplex = HALF_DUPLEX;
- DEBUGOUT(" Half Duplex\n");
- }
- } else {
- DEBUGOUT("1000 Mbs, Full Duplex\n");
- *speed = SPEED_1000;
- *duplex = FULL_DUPLEX;
- }
-
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
- */
- if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
- ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
- if (ret_val)
- return ret_val;
-
- if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
- *duplex = HALF_DUPLEX;
- else {
- ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
- if (ret_val)
- return ret_val;
- if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
- (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
- *duplex = HALF_DUPLEX;
- }
- }
-
- if ((hw->mac_type == e1000_80003es2lan) &&
- (hw->media_type == e1000_media_type_copper)) {
- if (*speed == SPEED_1000)
- ret_val = e1000_configure_kmrn_for_1000(hw);
- else
- ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
- if (ret_val)
- return ret_val;
- }
-
- if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
- ret_val = e1000_kumeran_lock_loss_workaround(hw);
- if (ret_val)
- return ret_val;
- }
-
- return E1000_SUCCESS;
+ u32 status;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_get_speed_and_duplex");
+
+ if (hw->mac_type >= e1000_82543) {
+ status = er32(STATUS);
+ if (status & E1000_STATUS_SPEED_1000) {
+ *speed = SPEED_1000;
+ DEBUGOUT("1000 Mbs, ");
+ } else if (status & E1000_STATUS_SPEED_100) {
+ *speed = SPEED_100;
+ DEBUGOUT("100 Mbs, ");
+ } else {
+ *speed = SPEED_10;
+ DEBUGOUT("10 Mbs, ");
+ }
+
+ if (status & E1000_STATUS_FD) {
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("Full Duplex\n");
+ } else {
+ *duplex = HALF_DUPLEX;
+ DEBUGOUT(" Half Duplex\n");
+ }
+ } else {
+ DEBUGOUT("1000 Mbs, Full Duplex\n");
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+ }
+
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+ * if it is operating at half duplex. Here we set the duplex settings to
+ * match the duplex in the link partner's capabilities.
+ */
+ if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+ ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+ *duplex = HALF_DUPLEX;
+ else {
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if ((*speed == SPEED_100
+ && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
+ || (*speed == SPEED_10
+ && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+ *duplex = HALF_DUPLEX;
+ }
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Blocks until autoneg completes or times out (~4.5 seconds)
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_wait_autoneg
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Blocks until autoneg completes or times out (~4.5 seconds)
+ */
static s32 e1000_wait_autoneg(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 i;
- u16 phy_data;
-
- DEBUGFUNC("e1000_wait_autoneg");
- DEBUGOUT("Waiting for Auto-Neg to complete.\n");
-
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
- for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg
- * Complete bit to be set.
- */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
- if (phy_data & MII_SR_AUTONEG_COMPLETE) {
- return E1000_SUCCESS;
- }
- msleep(100);
- }
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 i;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_wait_autoneg");
+ DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+ /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
+ */
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+ if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+ return E1000_SUCCESS;
+ }
+ msleep(100);
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Raises the Management Data Clock
-*
-* hw - Struct containing variables accessed by shared code
-* ctrl - Device control register's current value
-******************************************************************************/
+/**
+ * e1000_raise_mdi_clk - Raises the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
- */
- ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
- E1000_WRITE_FLUSH();
- udelay(10);
+ /* Raise the clock input to the Management Data Clock (by setting the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
}
-/******************************************************************************
-* Lowers the Management Data Clock
-*
-* hw - Struct containing variables accessed by shared code
-* ctrl - Device control register's current value
-******************************************************************************/
+/**
+ * e1000_lower_mdi_clk - Lowers the Management Data Clock
+ * @hw: Struct containing variables accessed by shared code
+ * @ctrl: Device control register's current value
+ */
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
- */
- ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
- E1000_WRITE_FLUSH();
- udelay(10);
+ /* Lower the clock input to the Management Data Clock (by clearing the MDC
+ * bit), and then delay 10 microseconds.
+ */
+ ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
+ E1000_WRITE_FLUSH();
+ udelay(10);
}
-/******************************************************************************
-* Shifts data bits out to the PHY
-*
-* hw - Struct containing variables accessed by shared code
-* data - Data to send out to the PHY
-* count - Number of bits to shift out
-*
-* Bits are shifted out in MSB to LSB order.
-******************************************************************************/
+/**
+ * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY
+ * @hw: Struct containing variables accessed by shared code
+ * @data: Data to send out to the PHY
+ * @count: Number of bits to shift out
+ *
+ * Bits are shifted out in MSB to LSB order.
+ */
static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
{
- u32 ctrl;
- u32 mask;
-
- /* We need to shift "count" number of bits out to the PHY. So, the value
- * in the "data" parameter will be shifted out to the PHY one bit at a
- * time. In order to do this, "data" must be broken down into bits.
- */
- mask = 0x01;
- mask <<= (count - 1);
-
- ctrl = er32(CTRL);
-
- /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
- ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
-
- while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
- */
- if (data & mask)
- ctrl |= E1000_CTRL_MDIO;
- else
- ctrl &= ~E1000_CTRL_MDIO;
-
- ew32(CTRL, ctrl);
- E1000_WRITE_FLUSH();
-
- udelay(10);
-
- e1000_raise_mdi_clk(hw, &ctrl);
- e1000_lower_mdi_clk(hw, &ctrl);
-
- mask = mask >> 1;
- }
-}
-
-/******************************************************************************
-* Shifts data bits in from the PHY
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Bits are shifted in in MSB to LSB order.
-******************************************************************************/
-static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
-{
- u32 ctrl;
- u16 data = 0;
- u8 i;
-
- /* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
- */
- ctrl = er32(CTRL);
-
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
- ctrl &= ~E1000_CTRL_MDIO_DIR;
- ctrl &= ~E1000_CTRL_MDIO;
-
- ew32(CTRL, ctrl);
- E1000_WRITE_FLUSH();
-
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
- */
- e1000_raise_mdi_clk(hw, &ctrl);
- e1000_lower_mdi_clk(hw, &ctrl);
-
- for (data = 0, i = 0; i < 16; i++) {
- data = data << 1;
- e1000_raise_mdi_clk(hw, &ctrl);
- ctrl = er32(CTRL);
- /* Check to see if we shifted in a "1". */
- if (ctrl & E1000_CTRL_MDIO)
- data |= 1;
- e1000_lower_mdi_clk(hw, &ctrl);
- }
-
- e1000_raise_mdi_clk(hw, &ctrl);
- e1000_lower_mdi_clk(hw, &ctrl);
-
- return data;
-}
-
-static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
-{
- u32 swfw_sync = 0;
- u32 swmask = mask;
- u32 fwmask = mask << 16;
- s32 timeout = 200;
+ u32 ctrl;
+ u32 mask;
- DEBUGFUNC("e1000_swfw_sync_acquire");
-
- if (hw->swfwhw_semaphore_present)
- return e1000_get_software_flag(hw);
+ /* We need to shift "count" number of bits out to the PHY. So, the value
+ * in the "data" parameter will be shifted out to the PHY one bit at a
+ * time. In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01;
+ mask <<= (count - 1);
- if (!hw->swfw_sync_present)
- return e1000_get_hw_eeprom_semaphore(hw);
+ ctrl = er32(CTRL);
- while (timeout) {
- if (e1000_get_hw_eeprom_semaphore(hw))
- return -E1000_ERR_SWFW_SYNC;
+ /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
- swfw_sync = er32(SW_FW_SYNC);
- if (!(swfw_sync & (fwmask | swmask))) {
- break;
- }
+ while (mask) {
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+ * then raising and lowering the Management Data Clock. A "0" is
+ * shifted out to the PHY by setting the MDIO bit to "0" and then
+ * raising and lowering the clock.
+ */
+ if (data & mask)
+ ctrl |= E1000_CTRL_MDIO;
+ else
+ ctrl &= ~E1000_CTRL_MDIO;
- /* firmware currently using resource (fwmask) */
- /* or other software thread currently using resource (swmask) */
- e1000_put_hw_eeprom_semaphore(hw);
- mdelay(5);
- timeout--;
- }
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
- if (!timeout) {
- DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
- return -E1000_ERR_SWFW_SYNC;
- }
+ udelay(10);
- swfw_sync |= swmask;
- ew32(SW_FW_SYNC, swfw_sync);
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
- e1000_put_hw_eeprom_semaphore(hw);
- return E1000_SUCCESS;
+ mask = mask >> 1;
+ }
}
-static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
+/**
+ * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Bits are shifted in in MSB to LSB order.
+ */
+static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
{
- u32 swfw_sync;
- u32 swmask = mask;
+ u32 ctrl;
+ u16 data = 0;
+ u8 i;
- DEBUGFUNC("e1000_swfw_sync_release");
+ /* In order to read a register from the PHY, we need to shift in a total
+ * of 18 bits from the PHY. The first two bit (turnaround) times are used
+ * to avoid contention on the MDIO pin when a read operation is performed.
+ * These two bits are ignored by us and thrown away. Bits are "shifted in"
+ * by raising the input to the Management Data Clock (setting the MDC bit),
+ * and then reading the value of the MDIO bit.
+ */
+ ctrl = er32(CTRL);
- if (hw->swfwhw_semaphore_present) {
- e1000_release_software_flag(hw);
- return;
- }
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ ctrl &= ~E1000_CTRL_MDIO_DIR;
+ ctrl &= ~E1000_CTRL_MDIO;
- if (!hw->swfw_sync_present) {
- e1000_put_hw_eeprom_semaphore(hw);
- return;
- }
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
- /* if (e1000_get_hw_eeprom_semaphore(hw))
- * return -E1000_ERR_SWFW_SYNC; */
- while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
- /* empty */
+ /* Raise and Lower the clock before reading in the data. This accounts for
+ * the turnaround bits. The first clock occurred when we clocked out the
+ * last bit of the Register Address.
+ */
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
+
+ for (data = 0, i = 0; i < 16; i++) {
+ data = data << 1;
+ e1000_raise_mdi_clk(hw, &ctrl);
+ ctrl = er32(CTRL);
+ /* Check to see if we shifted in a "1". */
+ if (ctrl & E1000_CTRL_MDIO)
+ data |= 1;
+ e1000_lower_mdi_clk(hw, &ctrl);
+ }
- swfw_sync = er32(SW_FW_SYNC);
- swfw_sync &= ~swmask;
- ew32(SW_FW_SYNC, swfw_sync);
+ e1000_raise_mdi_clk(hw, &ctrl);
+ e1000_lower_mdi_clk(hw, &ctrl);
- e1000_put_hw_eeprom_semaphore(hw);
+ return data;
}
-/*****************************************************************************
-* Reads the value from a PHY register, if the value is on a specific non zero
-* page, sets the page first.
-* hw - Struct containing variables accessed by shared code
-* reg_addr - address of the PHY register to read
-******************************************************************************/
+
+/**
+ * e1000_read_phy_reg - read a phy register
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to read
+ *
+ * Reads the value from a PHY register, if the value is on a specific non zero
+ * page, sets the page first.
+ */
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
- u32 ret_val;
- u16 swfw;
-
- DEBUGFUNC("e1000_read_phy_reg");
-
- if ((hw->mac_type == e1000_80003es2lan) &&
- (er32(STATUS) & E1000_STATUS_FUNC_1)) {
- swfw = E1000_SWFW_PHY1_SM;
- } else {
- swfw = E1000_SWFW_PHY0_SM;
- }
- if (e1000_swfw_sync_acquire(hw, swfw))
- return -E1000_ERR_SWFW_SYNC;
-
- if ((hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2) &&
- (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
- ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
- (u16)reg_addr);
- if (ret_val) {
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
- }
- } else if (hw->phy_type == e1000_phy_gg82563) {
- if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
- (hw->mac_type == e1000_80003es2lan)) {
- /* Select Configuration Page */
- if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
- ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
- (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
- } else {
- /* Use Alternative Page Select register to access
- * registers 30 and 31
- */
- ret_val = e1000_write_phy_reg_ex(hw,
- GG82563_PHY_PAGE_SELECT_ALT,
- (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
- }
-
- if (ret_val) {
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
- }
- }
- }
-
- ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
- phy_data);
-
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
+ u32 ret_val;
+
+ DEBUGFUNC("e1000_read_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
}
static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u16 *phy_data)
{
- u32 i;
- u32 mdic = 0;
- const u32 phy_addr = 1;
-
- DEBUGFUNC("e1000_read_phy_reg_ex");
-
- if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
- return -E1000_ERR_PARAM;
- }
-
- if (hw->mac_type > e1000_82543) {
- /* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
- */
- mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_READ));
-
- ew32(MDIC, mdic);
-
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 64; i++) {
- udelay(50);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY) break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Read did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (mdic & E1000_MDIC_ERROR) {
- DEBUGOUT("MDI Error\n");
- return -E1000_ERR_PHY;
- }
- *phy_data = (u16)mdic;
- } else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
- */
- e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
-
- /* Now combine the next few fields that are required for a read
- * operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
- */
- mdic = ((reg_addr) | (phy_addr << 5) |
- (PHY_OP_READ << 10) | (PHY_SOF << 12));
-
- e1000_shift_out_mdi_bits(hw, mdic, 14);
-
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
- */
- *phy_data = e1000_shift_in_mdi_bits(hw);
- }
- return E1000_SUCCESS;
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
+
+ DEBUGFUNC("e1000_read_phy_reg_ex");
+
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, and register address in the MDI
+ * Control register. The MAC will take care of interfacing with the
+ * PHY to retrieve the desired data.
+ */
+ mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_READ));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 64; i++) {
+ udelay(50);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (mdic & E1000_MDIC_ERROR) {
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ *phy_data = (u16) mdic;
+ } else {
+ /* We must first send a preamble through the MDIO pin to signal the
+ * beginning of an MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the next few fields that are required for a read
+ * operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine five different times. The format of
+ * a MII read instruction consists of a shift out of 14 bits and is
+ * defined as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+ * followed by a shift in of 18 bits. This first two bits shifted in
+ * are TurnAround bits used to avoid contention on the MDIO pin when a
+ * READ operation is performed. These two bits are thrown away
+ * followed by a shift in of 16 bits which contains the desired data.
+ */
+ mdic = ((reg_addr) | (phy_addr << 5) |
+ (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+ e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+ /* Now that we've shifted out the read command to the MII, we need to
+ * "shift in" the 16-bit value (18 total bits) of the requested PHY
+ * register address.
+ */
+ *phy_data = e1000_shift_in_mdi_bits(hw);
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Writes a value to a PHY register
-*
-* hw - Struct containing variables accessed by shared code
-* reg_addr - address of the PHY register to write
-* data - data to write to the PHY
-******************************************************************************/
+/**
+ * e1000_write_phy_reg - write a phy register
+ *
+ * @hw: Struct containing variables accessed by shared code
+ * @reg_addr: address of the PHY register to write
+ * @data: data to write to the PHY
+
+ * Writes a value to a PHY register
+ */
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
- u32 ret_val;
- u16 swfw;
-
- DEBUGFUNC("e1000_write_phy_reg");
-
- if ((hw->mac_type == e1000_80003es2lan) &&
- (er32(STATUS) & E1000_STATUS_FUNC_1)) {
- swfw = E1000_SWFW_PHY1_SM;
- } else {
- swfw = E1000_SWFW_PHY0_SM;
- }
- if (e1000_swfw_sync_acquire(hw, swfw))
- return -E1000_ERR_SWFW_SYNC;
-
- if ((hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2) &&
- (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
- ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
- (u16)reg_addr);
- if (ret_val) {
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
- }
- } else if (hw->phy_type == e1000_phy_gg82563) {
- if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
- (hw->mac_type == e1000_80003es2lan)) {
- /* Select Configuration Page */
- if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
- ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
- (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
- } else {
- /* Use Alternative Page Select register to access
- * registers 30 and 31
- */
- ret_val = e1000_write_phy_reg_ex(hw,
- GG82563_PHY_PAGE_SELECT_ALT,
- (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
- }
-
- if (ret_val) {
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
- }
- }
- }
-
- ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
- phy_data);
-
- e1000_swfw_sync_release(hw, swfw);
- return ret_val;
+ u32 ret_val;
+
+ DEBUGFUNC("e1000_write_phy_reg");
+
+ if ((hw->phy_type == e1000_phy_igp) &&
+ (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+ ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+ (u16) reg_addr);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+ phy_data);
+
+ return ret_val;
}
static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u16 phy_data)
{
- u32 i;
- u32 mdic = 0;
- const u32 phy_addr = 1;
-
- DEBUGFUNC("e1000_write_phy_reg_ex");
-
- if (reg_addr > MAX_PHY_REG_ADDRESS) {
- DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
- return -E1000_ERR_PARAM;
- }
-
- if (hw->mac_type > e1000_82543) {
- /* Set up Op-code, Phy Address, register address, and data intended
- * for the PHY register in the MDI Control register. The MAC will take
- * care of interfacing with the PHY to send the desired data.
- */
- mdic = (((u32)phy_data) |
- (reg_addr << E1000_MDIC_REG_SHIFT) |
- (phy_addr << E1000_MDIC_PHY_SHIFT) |
- (E1000_MDIC_OP_WRITE));
-
- ew32(MDIC, mdic);
-
- /* Poll the ready bit to see if the MDI read completed */
- for (i = 0; i < 641; i++) {
- udelay(5);
- mdic = er32(MDIC);
- if (mdic & E1000_MDIC_READY) break;
- }
- if (!(mdic & E1000_MDIC_READY)) {
- DEBUGOUT("MDI Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- } else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
- */
- e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
-
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
- */
- mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
- (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
- mdic <<= 16;
- mdic |= (u32)phy_data;
-
- e1000_shift_out_mdi_bits(hw, mdic, 32);
- }
-
- return E1000_SUCCESS;
-}
+ u32 i;
+ u32 mdic = 0;
+ const u32 phy_addr = 1;
-static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data)
-{
- u32 reg_val;
- u16 swfw;
- DEBUGFUNC("e1000_read_kmrn_reg");
-
- if ((hw->mac_type == e1000_80003es2lan) &&
- (er32(STATUS) & E1000_STATUS_FUNC_1)) {
- swfw = E1000_SWFW_PHY1_SM;
- } else {
- swfw = E1000_SWFW_PHY0_SM;
- }
- if (e1000_swfw_sync_acquire(hw, swfw))
- return -E1000_ERR_SWFW_SYNC;
-
- /* Write register address */
- reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
- E1000_KUMCTRLSTA_OFFSET) |
- E1000_KUMCTRLSTA_REN;
- ew32(KUMCTRLSTA, reg_val);
- udelay(2);
-
- /* Read the data returned */
- reg_val = er32(KUMCTRLSTA);
- *data = (u16)reg_val;
-
- e1000_swfw_sync_release(hw, swfw);
- return E1000_SUCCESS;
-}
+ DEBUGFUNC("e1000_write_phy_reg_ex");
-static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data)
-{
- u32 reg_val;
- u16 swfw;
- DEBUGFUNC("e1000_write_kmrn_reg");
-
- if ((hw->mac_type == e1000_80003es2lan) &&
- (er32(STATUS) & E1000_STATUS_FUNC_1)) {
- swfw = E1000_SWFW_PHY1_SM;
- } else {
- swfw = E1000_SWFW_PHY0_SM;
- }
- if (e1000_swfw_sync_acquire(hw, swfw))
- return -E1000_ERR_SWFW_SYNC;
-
- reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
- E1000_KUMCTRLSTA_OFFSET) | data;
- ew32(KUMCTRLSTA, reg_val);
- udelay(2);
-
- e1000_swfw_sync_release(hw, swfw);
- return E1000_SUCCESS;
+ if (reg_addr > MAX_PHY_REG_ADDRESS) {
+ DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+ return -E1000_ERR_PARAM;
+ }
+
+ if (hw->mac_type > e1000_82543) {
+ /* Set up Op-code, Phy Address, register address, and data intended
+ * for the PHY register in the MDI Control register. The MAC will take
+ * care of interfacing with the PHY to send the desired data.
+ */
+ mdic = (((u32) phy_data) |
+ (reg_addr << E1000_MDIC_REG_SHIFT) |
+ (phy_addr << E1000_MDIC_PHY_SHIFT) |
+ (E1000_MDIC_OP_WRITE));
+
+ ew32(MDIC, mdic);
+
+ /* Poll the ready bit to see if the MDI read completed */
+ for (i = 0; i < 641; i++) {
+ udelay(5);
+ mdic = er32(MDIC);
+ if (mdic & E1000_MDIC_READY)
+ break;
+ }
+ if (!(mdic & E1000_MDIC_READY)) {
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ } else {
+ /* We'll need to use the SW defined pins to shift the write command
+ * out to the PHY. We first send a preamble to the PHY to signal the
+ * beginning of the MII instruction. This is done by sending 32
+ * consecutive "1" bits.
+ */
+ e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+ /* Now combine the remaining required fields that will indicate a
+ * write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the command. The
+ * format of a MII write instruction is as follows:
+ * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ */
+ mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+ (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+ mdic <<= 16;
+ mdic |= (u32) phy_data;
+
+ e1000_shift_out_mdi_bits(hw, mdic, 32);
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Returns the PHY to the power-on reset state
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_phy_hw_reset - reset the phy, hardware style
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Returns the PHY to the power-on reset state
+ */
s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext;
- u32 led_ctrl;
- s32 ret_val;
- u16 swfw;
-
- DEBUGFUNC("e1000_phy_hw_reset");
-
- /* In the case of the phy reset being blocked, it's not an error, we
- * simply return success without performing the reset. */
- ret_val = e1000_check_phy_reset_block(hw);
- if (ret_val)
- return E1000_SUCCESS;
-
- DEBUGOUT("Resetting Phy...\n");
-
- if (hw->mac_type > e1000_82543) {
- if ((hw->mac_type == e1000_80003es2lan) &&
- (er32(STATUS) & E1000_STATUS_FUNC_1)) {
- swfw = E1000_SWFW_PHY1_SM;
- } else {
- swfw = E1000_SWFW_PHY0_SM;
- }
- if (e1000_swfw_sync_acquire(hw, swfw)) {
- DEBUGOUT("Unable to acquire swfw sync\n");
- return -E1000_ERR_SWFW_SYNC;
- }
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
- * For pre-e1000_82571 hardware, we delay for 10ms between the assert
- * and deassert. For e1000_82571 hardware and later, we instead delay
- * for 50us between and 10ms after the deassertion.
- */
- ctrl = er32(CTRL);
- ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
- E1000_WRITE_FLUSH();
-
- if (hw->mac_type < e1000_82571)
- msleep(10);
- else
- udelay(100);
-
- ew32(CTRL, ctrl);
- E1000_WRITE_FLUSH();
-
- if (hw->mac_type >= e1000_82571)
- mdelay(10);
-
- e1000_swfw_sync_release(hw, swfw);
- } else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
- */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
- ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
- ew32(CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH();
- msleep(10);
- ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
- ew32(CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH();
- }
- udelay(150);
-
- if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
- /* Configure activity LED after PHY reset */
- led_ctrl = er32(LEDCTL);
- led_ctrl &= IGP_ACTIVITY_LED_MASK;
- led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
- ew32(LEDCTL, led_ctrl);
- }
-
- /* Wait for FW to finish PHY configuration. */
- ret_val = e1000_get_phy_cfg_done(hw);
- if (ret_val != E1000_SUCCESS)
- return ret_val;
- e1000_release_software_semaphore(hw);
-
- if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
- ret_val = e1000_init_lcd_from_nvm(hw);
-
- return ret_val;
+ u32 ctrl, ctrl_ext;
+ u32 led_ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_hw_reset");
+
+ DEBUGOUT("Resetting Phy...\n");
+
+ if (hw->mac_type > e1000_82543) {
+ /* Read the device control register and assert the E1000_CTRL_PHY_RST
+ * bit. Then, take it out of reset.
+ * For e1000 hardware, we delay for 10ms between the assert
+ * and deassert.
+ */
+ ctrl = er32(CTRL);
+ ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH();
+
+ msleep(10);
+
+ ew32(CTRL, ctrl);
+ E1000_WRITE_FLUSH();
+
+ } else {
+ /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+ * bit to put the PHY into reset. Then, take it out of reset.
+ */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ msleep(10);
+ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+ ew32(CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH();
+ }
+ udelay(150);
+
+ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+ /* Configure activity LED after PHY reset */
+ led_ctrl = er32(LEDCTL);
+ led_ctrl &= IGP_ACTIVITY_LED_MASK;
+ led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+ ew32(LEDCTL, led_ctrl);
+ }
+
+ /* Wait for FW to finish PHY configuration. */
+ ret_val = e1000_get_phy_cfg_done(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+
+ return ret_val;
}
-/******************************************************************************
-* Resets the PHY
-*
-* hw - Struct containing variables accessed by shared code
-*
-* Sets bit 15 of the MII Control register
-******************************************************************************/
+/**
+ * e1000_phy_reset - reset the phy to commit settings
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY
+ * Sets bit 15 of the MII Control register
+ */
s32 e1000_phy_reset(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_phy_reset");
-
- /* In the case of the phy reset being blocked, it's not an error, we
- * simply return success without performing the reset. */
- ret_val = e1000_check_phy_reset_block(hw);
- if (ret_val)
- return E1000_SUCCESS;
-
- switch (hw->phy_type) {
- case e1000_phy_igp:
- case e1000_phy_igp_2:
- case e1000_phy_igp_3:
- case e1000_phy_ife:
- ret_val = e1000_phy_hw_reset(hw);
- if (ret_val)
- return ret_val;
- break;
- default:
- ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= MII_CR_RESET;
- ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
- if (ret_val)
- return ret_val;
-
- udelay(1);
- break;
- }
-
- if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
- e1000_phy_init_script(hw);
-
- return E1000_SUCCESS;
-}
+ s32 ret_val;
+ u16 phy_data;
-/******************************************************************************
-* Work-around for 82566 power-down: on D3 entry-
-* 1) disable gigabit link
-* 2) write VR power-down enable
-* 3) read it back
-* if successful continue, else issue LCD reset and repeat
-*
-* hw - struct containing variables accessed by shared code
-******************************************************************************/
-void e1000_phy_powerdown_workaround(struct e1000_hw *hw)
-{
- s32 reg;
- u16 phy_data;
- s32 retry = 0;
+ DEBUGFUNC("e1000_phy_reset");
- DEBUGFUNC("e1000_phy_powerdown_workaround");
+ switch (hw->phy_type) {
+ case e1000_phy_igp:
+ ret_val = e1000_phy_hw_reset(hw);
+ if (ret_val)
+ return ret_val;
+ break;
+ default:
+ ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
- if (hw->phy_type != e1000_phy_igp_3)
- return;
+ phy_data |= MII_CR_RESET;
+ ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
- do {
- /* Disable link */
- reg = er32(PHY_CTRL);
- ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
- E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+ udelay(1);
+ break;
+ }
- /* Write VR power-down enable - bits 9:8 should be 10b */
- e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- phy_data |= (1 << 9);
- phy_data &= ~(1 << 8);
- e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
+ if (hw->phy_type == e1000_phy_igp)
+ e1000_phy_init_script(hw);
- /* Read it back and test */
- e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
- break;
+ return E1000_SUCCESS;
+}
- /* Issue PHY reset and repeat at most one more time */
- reg = er32(CTRL);
- ew32(CTRL, reg | E1000_CTRL_PHY_RST);
- retry++;
- } while (retry);
+/**
+ * e1000_detect_gig_phy - check the phy type
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Probes the expected PHY address for known PHY IDs
+ */
+static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+ s32 phy_init_status, ret_val;
+ u16 phy_id_high, phy_id_low;
+ bool match = false;
- return;
+ DEBUGFUNC("e1000_detect_gig_phy");
-}
+ if (hw->phy_id != 0)
+ return E1000_SUCCESS;
-/******************************************************************************
-* Work-around for 82566 Kumeran PCS lock loss:
-* On link status change (i.e. PCI reset, speed change) and link is up and
-* speed is gigabit-
-* 0) if workaround is optionally disabled do nothing
-* 1) wait 1ms for Kumeran link to come up
-* 2) check Kumeran Diagnostic register PCS lock loss bit
-* 3) if not set the link is locked (all is good), otherwise...
-* 4) reset the PHY
-* 5) repeat up to 10 times
-* Note: this is only called for IGP3 copper when speed is 1gb.
-*
-* hw - struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
-{
- s32 ret_val;
- s32 reg;
- s32 cnt;
- u16 phy_data;
-
- if (hw->kmrn_lock_loss_workaround_disabled)
- return E1000_SUCCESS;
-
- /* Make sure link is up before proceeding. If not just return.
- * Attempting this while link is negotiating fouled up link
- * stability */
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
-
- if (phy_data & MII_SR_LINK_STATUS) {
- for (cnt = 0; cnt < 10; cnt++) {
- /* read once to clear */
- ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
- if (ret_val)
- return ret_val;
- /* and again to get new status */
- ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
- if (ret_val)
- return ret_val;
-
- /* check for PCS lock */
- if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
- return E1000_SUCCESS;
-
- /* Issue PHY reset */
- e1000_phy_hw_reset(hw);
- mdelay(5);
- }
- /* Disable GigE link negotiation */
- reg = er32(PHY_CTRL);
- ew32(PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
- E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
-
- /* unable to acquire PCS lock */
- return E1000_ERR_PHY;
- }
-
- return E1000_SUCCESS;
-}
+ /* Read the PHY ID Registers to identify which PHY is onboard. */
+ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+ if (ret_val)
+ return ret_val;
-/******************************************************************************
-* Probes the expected PHY address for known PHY IDs
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
-static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
-{
- s32 phy_init_status, ret_val;
- u16 phy_id_high, phy_id_low;
- bool match = false;
-
- DEBUGFUNC("e1000_detect_gig_phy");
-
- if (hw->phy_id != 0)
- return E1000_SUCCESS;
-
- /* The 82571 firmware may still be configuring the PHY. In this
- * case, we cannot access the PHY until the configuration is done. So
- * we explicitly set the PHY values. */
- if (hw->mac_type == e1000_82571 ||
- hw->mac_type == e1000_82572) {
- hw->phy_id = IGP01E1000_I_PHY_ID;
- hw->phy_type = e1000_phy_igp_2;
- return E1000_SUCCESS;
- }
-
- /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
- * around that forces PHY page 0 to be set or the reads fail. The rest of
- * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
- * So for ESB-2 we need to have this set so our reads won't fail. If the
- * attached PHY is not a e1000_phy_gg82563, the routines below will figure
- * this out as well. */
- if (hw->mac_type == e1000_80003es2lan)
- hw->phy_type = e1000_phy_gg82563;
-
- /* Read the PHY ID Registers to identify which PHY is onboard. */
- ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
- if (ret_val)
- return ret_val;
-
- hw->phy_id = (u32)(phy_id_high << 16);
- udelay(20);
- ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
- if (ret_val)
- return ret_val;
-
- hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
- hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
-
- switch (hw->mac_type) {
- case e1000_82543:
- if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
- break;
- case e1000_82544:
- if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
- break;
- case e1000_82540:
- case e1000_82545:
- case e1000_82545_rev_3:
- case e1000_82546:
- case e1000_82546_rev_3:
- if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
- break;
- case e1000_82541:
- case e1000_82541_rev_2:
- case e1000_82547:
- case e1000_82547_rev_2:
- if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
- break;
- case e1000_82573:
- if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
- break;
- case e1000_80003es2lan:
- if (hw->phy_id == GG82563_E_PHY_ID) match = true;
- break;
- case e1000_ich8lan:
- if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
- if (hw->phy_id == IFE_E_PHY_ID) match = true;
- if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
- if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
- break;
- default:
- DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
- return -E1000_ERR_CONFIG;
- }
- phy_init_status = e1000_set_phy_type(hw);
-
- if ((match) && (phy_init_status == E1000_SUCCESS)) {
- DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
- return E1000_SUCCESS;
- }
- DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
- return -E1000_ERR_PHY;
+ hw->phy_id = (u32) (phy_id_high << 16);
+ udelay(20);
+ ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
+ hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+
+ switch (hw->mac_type) {
+ case e1000_82543:
+ if (hw->phy_id == M88E1000_E_PHY_ID)
+ match = true;
+ break;
+ case e1000_82544:
+ if (hw->phy_id == M88E1000_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (hw->phy_id == M88E1011_I_PHY_ID)
+ match = true;
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (hw->phy_id == IGP01E1000_I_PHY_ID)
+ match = true;
+ break;
+ default:
+ DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+ return -E1000_ERR_CONFIG;
+ }
+ phy_init_status = e1000_set_phy_type(hw);
+
+ if ((match) && (phy_init_status == E1000_SUCCESS)) {
+ DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+ return -E1000_ERR_PHY;
}
-/******************************************************************************
-* Resets the PHY's DSP
-*
-* hw - Struct containing variables accessed by shared code
-******************************************************************************/
+/**
+ * e1000_phy_reset_dsp - reset DSP
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Resets the PHY's DSP
+ */
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
- s32 ret_val;
- DEBUGFUNC("e1000_phy_reset_dsp");
-
- do {
- if (hw->phy_type != e1000_phy_gg82563) {
- ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
- if (ret_val) break;
- }
- ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
- if (ret_val) break;
- ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
- if (ret_val) break;
- ret_val = E1000_SUCCESS;
- } while (0);
-
- return ret_val;
+ s32 ret_val;
+ DEBUGFUNC("e1000_phy_reset_dsp");
+
+ do {
+ ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+ if (ret_val)
+ break;
+ ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+ if (ret_val)
+ break;
+ ret_val = E1000_SUCCESS;
+ } while (0);
+
+ return ret_val;
}
-/******************************************************************************
-* Get PHY information from various PHY registers for igp PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
+/**
+ * e1000_phy_igp_get_info - get igp specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for igp PHY only.
+ */
static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
struct e1000_phy_info *phy_info)
{
- s32 ret_val;
- u16 phy_data, min_length, max_length, average;
- e1000_rev_polarity polarity;
-
- DEBUGFUNC("e1000_phy_igp_get_info");
-
- /* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
- phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
-
- /* IGP01E1000 does not need to support it. */
- phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
-
- /* IGP01E1000 always correct polarity reversal */
- phy_info->polarity_correction = e1000_polarity_reversal_enabled;
-
- /* Check polarity status */
- ret_val = e1000_check_polarity(hw, &polarity);
- if (ret_val)
- return ret_val;
-
- phy_info->cable_polarity = polarity;
-
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & IGP01E1000_PSSR_MDIX) >>
- IGP01E1000_PSSR_MDIX_SHIFT);
-
- if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
- IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
- e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
- e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-
- /* Get cable length */
- ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
- if (ret_val)
- return ret_val;
-
- /* Translate to old method */
- average = (max_length + min_length) / 2;
-
- if (average <= e1000_igp_cable_length_50)
- phy_info->cable_length = e1000_cable_length_50;
- else if (average <= e1000_igp_cable_length_80)
- phy_info->cable_length = e1000_cable_length_50_80;
- else if (average <= e1000_igp_cable_length_110)
- phy_info->cable_length = e1000_cable_length_80_110;
- else if (average <= e1000_igp_cable_length_140)
- phy_info->cable_length = e1000_cable_length_110_140;
- else
- phy_info->cable_length = e1000_cable_length_140;
- }
-
- return E1000_SUCCESS;
-}
+ s32 ret_val;
+ u16 phy_data, min_length, max_length, average;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_igp_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ /* IGP01E1000 does not need to support it. */
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+ /* IGP01E1000 always correct polarity reversal */
+ phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >>
+ IGP01E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+ /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ /* Get cable length */
+ ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+ if (ret_val)
+ return ret_val;
+
+ /* Translate to old method */
+ average = (max_length + min_length) / 2;
+
+ if (average <= e1000_igp_cable_length_50)
+ phy_info->cable_length = e1000_cable_length_50;
+ else if (average <= e1000_igp_cable_length_80)
+ phy_info->cable_length = e1000_cable_length_50_80;
+ else if (average <= e1000_igp_cable_length_110)
+ phy_info->cable_length = e1000_cable_length_80_110;
+ else if (average <= e1000_igp_cable_length_140)
+ phy_info->cable_length = e1000_cable_length_110_140;
+ else
+ phy_info->cable_length = e1000_cable_length_140;
+ }
-/******************************************************************************
-* Get PHY information from various PHY registers for ife PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
-static s32 e1000_phy_ife_get_info(struct e1000_hw *hw,
- struct e1000_phy_info *phy_info)
-{
- s32 ret_val;
- u16 phy_data;
- e1000_rev_polarity polarity;
-
- DEBUGFUNC("e1000_phy_ife_get_info");
-
- phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
- phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
-
- ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
- phy_info->polarity_correction =
- ((phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
- IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT) ?
- e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
-
- if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
- ret_val = e1000_check_polarity(hw, &polarity);
- if (ret_val)
- return ret_val;
- } else {
- /* Polarity is forced. */
- polarity = ((phy_data & IFE_PSC_FORCE_POLARITY) >>
- IFE_PSC_FORCE_POLARITY_SHIFT) ?
- e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
- }
- phy_info->cable_polarity = polarity;
-
- ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->mdix_mode = (e1000_auto_x_mode)
- ((phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
- IFE_PMC_MDIX_MODE_SHIFT);
-
- return E1000_SUCCESS;
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Get PHY information from various PHY registers fot m88 PHY only.
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
+/**
+ * e1000_phy_m88_get_info - get m88 specific registers
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers for m88 PHY only.
+ */
static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
struct e1000_phy_info *phy_info)
{
- s32 ret_val;
- u16 phy_data;
- e1000_rev_polarity polarity;
-
- DEBUGFUNC("e1000_phy_m88_get_info");
-
- /* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
- phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
-
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->extended_10bt_distance =
- ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
- M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
- e1000_10bt_ext_dist_enable_lower : e1000_10bt_ext_dist_enable_normal;
-
- phy_info->polarity_correction =
- ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
- M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
- e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
-
- /* Check polarity status */
- ret_val = e1000_check_polarity(hw, &polarity);
- if (ret_val)
- return ret_val;
- phy_info->cable_polarity = polarity;
-
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->mdix_mode = (e1000_auto_x_mode)((phy_data & M88E1000_PSSR_MDIX) >>
- M88E1000_PSSR_MDIX_SHIFT);
-
- if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- /* Cable Length Estimation and Local/Remote Receiver Information
- * are only valid at 1000 Mbps.
- */
- if (hw->phy_type != e1000_phy_gg82563) {
- phy_info->cable_length = (e1000_cable_length)((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT);
- } else {
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->cable_length = (e1000_cable_length)(phy_data & GG82563_DSPD_CABLE_LENGTH);
- }
-
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
- SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
- e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
- SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
- e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-
- }
-
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 phy_data;
+ e1000_rev_polarity polarity;
+
+ DEBUGFUNC("e1000_phy_m88_get_info");
+
+ /* The downshift status is checked only once, after link is established,
+ * and it stored in the hw->speed_downgraded parameter. */
+ phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->extended_10bt_distance =
+ ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+ M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ?
+ e1000_10bt_ext_dist_enable_lower :
+ e1000_10bt_ext_dist_enable_normal;
+
+ phy_info->polarity_correction =
+ ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+ M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ?
+ e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled;
+
+ /* Check polarity status */
+ ret_val = e1000_check_polarity(hw, &polarity);
+ if (ret_val)
+ return ret_val;
+ phy_info->cable_polarity = polarity;
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->mdix_mode =
+ (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >>
+ M88E1000_PSSR_MDIX_SHIFT);
+
+ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+ /* Cable Length Estimation and Local/Remote Receiver Information
+ * are only valid at 1000 Mbps.
+ */
+ phy_info->cable_length =
+ (e1000_cable_length) ((phy_data &
+ M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+ SR_1000T_LOCAL_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+ phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+ SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
+ e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
+
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
-* Get PHY information from various PHY registers
-*
-* hw - Struct containing variables accessed by shared code
-* phy_info - PHY information structure
-******************************************************************************/
+/**
+ * e1000_phy_get_info - request phy info
+ * @hw: Struct containing variables accessed by shared code
+ * @phy_info: PHY information structure
+ *
+ * Get PHY information from various PHY registers
+ */
s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_phy_get_info");
-
- phy_info->cable_length = e1000_cable_length_undefined;
- phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
- phy_info->cable_polarity = e1000_rev_polarity_undefined;
- phy_info->downshift = e1000_downshift_undefined;
- phy_info->polarity_correction = e1000_polarity_reversal_undefined;
- phy_info->mdix_mode = e1000_auto_x_mode_undefined;
- phy_info->local_rx = e1000_1000t_rx_status_undefined;
- phy_info->remote_rx = e1000_1000t_rx_status_undefined;
-
- if (hw->media_type != e1000_media_type_copper) {
- DEBUGOUT("PHY info is only valid for copper media\n");
- return -E1000_ERR_CONFIG;
- }
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
- if (ret_val)
- return ret_val;
-
- if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
- DEBUGOUT("PHY info is only valid if link is up\n");
- return -E1000_ERR_CONFIG;
- }
-
- if (hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2)
- return e1000_phy_igp_get_info(hw, phy_info);
- else if (hw->phy_type == e1000_phy_ife)
- return e1000_phy_ife_get_info(hw, phy_info);
- else
- return e1000_phy_m88_get_info(hw, phy_info);
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_phy_get_info");
+
+ phy_info->cable_length = e1000_cable_length_undefined;
+ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+ phy_info->cable_polarity = e1000_rev_polarity_undefined;
+ phy_info->downshift = e1000_downshift_undefined;
+ phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+ phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+ phy_info->local_rx = e1000_1000t_rx_status_undefined;
+ phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+ if (hw->media_type != e1000_media_type_copper) {
+ DEBUGOUT("PHY info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+ DEBUGOUT("PHY info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (hw->phy_type == e1000_phy_igp)
+ return e1000_phy_igp_get_info(hw, phy_info);
+ else
+ return e1000_phy_m88_get_info(hw, phy_info);
}
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_validate_mdi_settings");
-
- if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
- DEBUGOUT("Invalid MDI setting detected\n");
- hw->mdix = 1;
- return -E1000_ERR_CONFIG;
- }
- return E1000_SUCCESS;
-}
+ DEBUGFUNC("e1000_validate_mdi_settings");
+ if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+ DEBUGOUT("Invalid MDI setting detected\n");
+ hw->mdix = 1;
+ return -E1000_ERR_CONFIG;
+ }
+ return E1000_SUCCESS;
+}
-/******************************************************************************
- * Sets up eeprom variables in the hw struct. Must be called after mac_type
- * is configured. Additionally, if this is ICH8, the flash controller GbE
- * registers must be mapped, or this will crash.
+/**
+ * e1000_init_eeprom_params - initialize sw eeprom vars
+ * @hw: Struct containing variables accessed by shared code
*
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Sets up eeprom variables in the hw struct. Must be called after mac_type
+ * is configured.
+ */
s32 e1000_init_eeprom_params(struct e1000_hw *hw)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd = er32(EECD);
- s32 ret_val = E1000_SUCCESS;
- u16 eeprom_size;
-
- DEBUGFUNC("e1000_init_eeprom_params");
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- case e1000_82544:
- eeprom->type = e1000_eeprom_microwire;
- eeprom->word_size = 64;
- eeprom->opcode_bits = 3;
- eeprom->address_bits = 6;
- eeprom->delay_usec = 50;
- eeprom->use_eerd = false;
- eeprom->use_eewr = false;
- break;
- case e1000_82540:
- case e1000_82545:
- case e1000_82545_rev_3:
- case e1000_82546:
- case e1000_82546_rev_3:
- eeprom->type = e1000_eeprom_microwire;
- eeprom->opcode_bits = 3;
- eeprom->delay_usec = 50;
- if (eecd & E1000_EECD_SIZE) {
- eeprom->word_size = 256;
- eeprom->address_bits = 8;
- } else {
- eeprom->word_size = 64;
- eeprom->address_bits = 6;
- }
- eeprom->use_eerd = false;
- eeprom->use_eewr = false;
- break;
- case e1000_82541:
- case e1000_82541_rev_2:
- case e1000_82547:
- case e1000_82547_rev_2:
- if (eecd & E1000_EECD_TYPE) {
- eeprom->type = e1000_eeprom_spi;
- eeprom->opcode_bits = 8;
- eeprom->delay_usec = 1;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->page_size = 32;
- eeprom->address_bits = 16;
- } else {
- eeprom->page_size = 8;
- eeprom->address_bits = 8;
- }
- } else {
- eeprom->type = e1000_eeprom_microwire;
- eeprom->opcode_bits = 3;
- eeprom->delay_usec = 50;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->word_size = 256;
- eeprom->address_bits = 8;
- } else {
- eeprom->word_size = 64;
- eeprom->address_bits = 6;
- }
- }
- eeprom->use_eerd = false;
- eeprom->use_eewr = false;
- break;
- case e1000_82571:
- case e1000_82572:
- eeprom->type = e1000_eeprom_spi;
- eeprom->opcode_bits = 8;
- eeprom->delay_usec = 1;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->page_size = 32;
- eeprom->address_bits = 16;
- } else {
- eeprom->page_size = 8;
- eeprom->address_bits = 8;
- }
- eeprom->use_eerd = false;
- eeprom->use_eewr = false;
- break;
- case e1000_82573:
- eeprom->type = e1000_eeprom_spi;
- eeprom->opcode_bits = 8;
- eeprom->delay_usec = 1;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->page_size = 32;
- eeprom->address_bits = 16;
- } else {
- eeprom->page_size = 8;
- eeprom->address_bits = 8;
- }
- eeprom->use_eerd = true;
- eeprom->use_eewr = true;
- if (!e1000_is_onboard_nvm_eeprom(hw)) {
- eeprom->type = e1000_eeprom_flash;
- eeprom->word_size = 2048;
-
- /* Ensure that the Autonomous FLASH update bit is cleared due to
- * Flash update issue on parts which use a FLASH for NVM. */
- eecd &= ~E1000_EECD_AUPDEN;
- ew32(EECD, eecd);
- }
- break;
- case e1000_80003es2lan:
- eeprom->type = e1000_eeprom_spi;
- eeprom->opcode_bits = 8;
- eeprom->delay_usec = 1;
- if (eecd & E1000_EECD_ADDR_BITS) {
- eeprom->page_size = 32;
- eeprom->address_bits = 16;
- } else {
- eeprom->page_size = 8;
- eeprom->address_bits = 8;
- }
- eeprom->use_eerd = true;
- eeprom->use_eewr = false;
- break;
- case e1000_ich8lan:
- {
- s32 i = 0;
- u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
-
- eeprom->type = e1000_eeprom_ich8;
- eeprom->use_eerd = false;
- eeprom->use_eewr = false;
- eeprom->word_size = E1000_SHADOW_RAM_WORDS;
-
- /* Zero the shadow RAM structure. But don't load it from NVM
- * so as to save time for driver init */
- if (hw->eeprom_shadow_ram != NULL) {
- for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
- hw->eeprom_shadow_ram[i].modified = false;
- hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
- }
- }
-
- hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
- ICH_FLASH_SECTOR_SIZE;
-
- hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
- hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
-
- hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
-
- hw->flash_bank_size /= 2 * sizeof(u16);
-
- break;
- }
- default:
- break;
- }
-
- if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
- */
- if (hw->mac_type <= e1000_82547_rev_2) {
- /* Set to default value for initial eeprom read. */
- eeprom->word_size = 64;
- ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
- if (ret_val)
- return ret_val;
- eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
- /* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
- if (eeprom_size)
- eeprom_size++;
- } else {
- eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
- }
-
- eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
- }
- return ret_val;
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd = er32(EECD);
+ s32 ret_val = E1000_SUCCESS;
+ u16 eeprom_size;
+
+ DEBUGFUNC("e1000_init_eeprom_params");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->word_size = 64;
+ eeprom->opcode_bits = 3;
+ eeprom->address_bits = 6;
+ eeprom->delay_usec = 50;
+ break;
+ case e1000_82540:
+ case e1000_82545:
+ case e1000_82545_rev_3:
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_SIZE) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ break;
+ case e1000_82541:
+ case e1000_82541_rev_2:
+ case e1000_82547:
+ case e1000_82547_rev_2:
+ if (eecd & E1000_EECD_TYPE) {
+ eeprom->type = e1000_eeprom_spi;
+ eeprom->opcode_bits = 8;
+ eeprom->delay_usec = 1;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->page_size = 32;
+ eeprom->address_bits = 16;
+ } else {
+ eeprom->page_size = 8;
+ eeprom->address_bits = 8;
+ }
+ } else {
+ eeprom->type = e1000_eeprom_microwire;
+ eeprom->opcode_bits = 3;
+ eeprom->delay_usec = 50;
+ if (eecd & E1000_EECD_ADDR_BITS) {
+ eeprom->word_size = 256;
+ eeprom->address_bits = 8;
+ } else {
+ eeprom->word_size = 64;
+ eeprom->address_bits = 6;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (eeprom->type == e1000_eeprom_spi) {
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+ * 32KB (incremented by powers of 2).
+ */
+ /* Set to default value for initial eeprom read. */
+ eeprom->word_size = 64;
+ ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+ if (ret_val)
+ return ret_val;
+ eeprom_size =
+ (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+ /* 256B eeprom size was not supported in earlier hardware, so we
+ * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+ * is never the result used in the shifting logic below. */
+ if (eeprom_size)
+ eeprom_size++;
+
+ eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+ }
+ return ret_val;
}
-/******************************************************************************
- * Raises the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd - EECD's current value
- *****************************************************************************/
+/**
+ * e1000_raise_ee_clk - Raises the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Raise the clock input to the EEPROM (by setting the SK bit), and then
- * wait <delay> microseconds.
- */
- *eecd = *eecd | E1000_EECD_SK;
- ew32(EECD, *eecd);
- E1000_WRITE_FLUSH();
- udelay(hw->eeprom.delay_usec);
+ /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+ * wait <delay> microseconds.
+ */
+ *eecd = *eecd | E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
}
-/******************************************************************************
- * Lowers the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd - EECD's current value
- *****************************************************************************/
+/**
+ * e1000_lower_ee_clk - Lowers the EEPROM's clock input.
+ * @hw: Struct containing variables accessed by shared code
+ * @eecd: EECD's current value
+ */
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd = *eecd & ~E1000_EECD_SK;
- ew32(EECD, *eecd);
- E1000_WRITE_FLUSH();
- udelay(hw->eeprom.delay_usec);
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+ * wait 50 microseconds.
+ */
+ *eecd = *eecd & ~E1000_EECD_SK;
+ ew32(EECD, *eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
}
-/******************************************************************************
- * Shift data bits out to the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * data - data to send to the EEPROM
- * count - number of bits to shift out
- *****************************************************************************/
+/**
+ * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @data: data to send to the EEPROM
+ * @count: number of bits to shift out
+ */
static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd;
- u32 mask;
-
- /* We need to shift "count" bits out to the EEPROM. So, value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- */
- mask = 0x01 << (count - 1);
- eecd = er32(EECD);
- if (eeprom->type == e1000_eeprom_microwire) {
- eecd &= ~E1000_EECD_DO;
- } else if (eeprom->type == e1000_eeprom_spi) {
- eecd |= E1000_EECD_DO;
- }
- do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
- */
- eecd &= ~E1000_EECD_DI;
-
- if (data & mask)
- eecd |= E1000_EECD_DI;
-
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
-
- udelay(eeprom->delay_usec);
-
- e1000_raise_ee_clk(hw, &eecd);
- e1000_lower_ee_clk(hw, &eecd);
-
- mask = mask >> 1;
-
- } while (mask);
-
- /* We leave the "DI" bit set to "0" when we leave this routine. */
- eecd &= ~E1000_EECD_DI;
- ew32(EECD, eecd);
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u32 mask;
+
+ /* We need to shift "count" bits out to the EEPROM. So, value in the
+ * "data" parameter will be shifted out to the EEPROM one bit at a time.
+ * In order to do this, "data" must be broken down into bits.
+ */
+ mask = 0x01 << (count - 1);
+ eecd = er32(EECD);
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~E1000_EECD_DO;
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_DO;
+ }
+ do {
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+ * and then raising and then lowering the clock (the SK bit controls
+ * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
+ * by setting "DI" to "0" and then raising and then lowering the clock.
+ */
+ eecd &= ~E1000_EECD_DI;
+
+ if (data & mask)
+ eecd |= E1000_EECD_DI;
+
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+
+ udelay(eeprom->delay_usec);
+
+ e1000_raise_ee_clk(hw, &eecd);
+ e1000_lower_ee_clk(hw, &eecd);
+
+ mask = mask >> 1;
+
+ } while (mask);
+
+ /* We leave the "DI" bit set to "0" when we leave this routine. */
+ eecd &= ~E1000_EECD_DI;
+ ew32(EECD, eecd);
}
-/******************************************************************************
- * Shift data bits in from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM
+ * @hw: Struct containing variables accessed by shared code
+ * @count: number of bits to shift in
+ */
static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
{
- u32 eecd;
- u32 i;
- u16 data;
+ u32 eecd;
+ u32 i;
+ u16 data;
- /* In order to read a register from the EEPROM, we need to shift 'count'
- * bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
- */
+ /* In order to read a register from the EEPROM, we need to shift 'count'
+ * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+ * input to the EEPROM (setting the SK bit), and then reading the value of
+ * the "DO" bit. During this "shifting in" process the "DI" bit should
+ * always be clear.
+ */
- eecd = er32(EECD);
+ eecd = er32(EECD);
- eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
- data = 0;
+ eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+ data = 0;
- for (i = 0; i < count; i++) {
- data = data << 1;
- e1000_raise_ee_clk(hw, &eecd);
+ for (i = 0; i < count; i++) {
+ data = data << 1;
+ e1000_raise_ee_clk(hw, &eecd);
- eecd = er32(EECD);
+ eecd = er32(EECD);
- eecd &= ~(E1000_EECD_DI);
- if (eecd & E1000_EECD_DO)
- data |= 1;
+ eecd &= ~(E1000_EECD_DI);
+ if (eecd & E1000_EECD_DO)
+ data |= 1;
- e1000_lower_ee_clk(hw, &eecd);
- }
+ e1000_lower_ee_clk(hw, &eecd);
+ }
- return data;
+ return data;
}
-/******************************************************************************
- * Prepares EEPROM for access
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_acquire_eeprom - Prepares EEPROM for access
+ * @hw: Struct containing variables accessed by shared code
*
* Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
* function should be called before issuing a command to the EEPROM.
- *****************************************************************************/
+ */
static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd, i=0;
-
- DEBUGFUNC("e1000_acquire_eeprom");
-
- if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
- return -E1000_ERR_SWFW_SYNC;
- eecd = er32(EECD);
-
- if (hw->mac_type != e1000_82573) {
- /* Request EEPROM Access */
- if (hw->mac_type > e1000_82544) {
- eecd |= E1000_EECD_REQ;
- ew32(EECD, eecd);
- eecd = er32(EECD);
- while ((!(eecd & E1000_EECD_GNT)) &&
- (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
- i++;
- udelay(5);
- eecd = er32(EECD);
- }
- if (!(eecd & E1000_EECD_GNT)) {
- eecd &= ~E1000_EECD_REQ;
- ew32(EECD, eecd);
- DEBUGOUT("Could not acquire EEPROM grant\n");
- e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
- return -E1000_ERR_EEPROM;
- }
- }
- }
-
- /* Setup EEPROM for Read/Write */
-
- if (eeprom->type == e1000_eeprom_microwire) {
- /* Clear SK and DI */
- eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
- ew32(EECD, eecd);
-
- /* Set CS */
- eecd |= E1000_EECD_CS;
- ew32(EECD, eecd);
- } else if (eeprom->type == e1000_eeprom_spi) {
- /* Clear SK and CS */
- eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- ew32(EECD, eecd);
- udelay(1);
- }
-
- return E1000_SUCCESS;
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd, i = 0;
+
+ DEBUGFUNC("e1000_acquire_eeprom");
+
+ eecd = er32(EECD);
+
+ /* Request EEPROM Access */
+ if (hw->mac_type > e1000_82544) {
+ eecd |= E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ eecd = er32(EECD);
+ while ((!(eecd & E1000_EECD_GNT)) &&
+ (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+ i++;
+ udelay(5);
+ eecd = er32(EECD);
+ }
+ if (!(eecd & E1000_EECD_GNT)) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ DEBUGOUT("Could not acquire EEPROM grant\n");
+ return -E1000_ERR_EEPROM;
+ }
+ }
+
+ /* Setup EEPROM for Read/Write */
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ /* Clear SK and DI */
+ eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+ ew32(EECD, eecd);
+
+ /* Set CS */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Clear SK and CS */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ udelay(1);
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Returns EEPROM to a "standby" state
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_standby_eeprom - Returns EEPROM to a "standby" state
+ * @hw: Struct containing variables accessed by shared code
+ */
static void e1000_standby_eeprom(struct e1000_hw *hw)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd;
-
- eecd = er32(EECD);
-
- if (eeprom->type == e1000_eeprom_microwire) {
- eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
-
- /* Clock high */
- eecd |= E1000_EECD_SK;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
-
- /* Select EEPROM */
- eecd |= E1000_EECD_CS;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
-
- /* Clock low */
- eecd &= ~E1000_EECD_SK;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
- } else if (eeprom->type == e1000_eeprom_spi) {
- /* Toggle CS to flush commands */
- eecd |= E1000_EECD_CS;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
- eecd &= ~E1000_EECD_CS;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(eeprom->delay_usec);
- }
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+
+ eecd = er32(EECD);
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock high */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Select EEPROM */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+
+ /* Clock low */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ } else if (eeprom->type == e1000_eeprom_spi) {
+ /* Toggle CS to flush commands */
+ eecd |= E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ eecd &= ~E1000_EECD_CS;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(eeprom->delay_usec);
+ }
}
-/******************************************************************************
- * Terminates a command by inverting the EEPROM's chip select pin
+/**
+ * e1000_release_eeprom - drop chip select
+ * @hw: Struct containing variables accessed by shared code
*
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Terminates a command by inverting the EEPROM's chip select pin
+ */
static void e1000_release_eeprom(struct e1000_hw *hw)
{
- u32 eecd;
-
- DEBUGFUNC("e1000_release_eeprom");
+ u32 eecd;
- eecd = er32(EECD);
+ DEBUGFUNC("e1000_release_eeprom");
- if (hw->eeprom.type == e1000_eeprom_spi) {
- eecd |= E1000_EECD_CS; /* Pull CS high */
- eecd &= ~E1000_EECD_SK; /* Lower SCK */
+ eecd = er32(EECD);
- ew32(EECD, eecd);
+ if (hw->eeprom.type == e1000_eeprom_spi) {
+ eecd |= E1000_EECD_CS; /* Pull CS high */
+ eecd &= ~E1000_EECD_SK; /* Lower SCK */
- udelay(hw->eeprom.delay_usec);
- } else if (hw->eeprom.type == e1000_eeprom_microwire) {
- /* cleanup eeprom */
+ ew32(EECD, eecd);
- /* CS on Microwire is active-high */
- eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+ udelay(hw->eeprom.delay_usec);
+ } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+ /* cleanup eeprom */
- ew32(EECD, eecd);
+ /* CS on Microwire is active-high */
+ eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
- /* Rising edge of clock */
- eecd |= E1000_EECD_SK;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(hw->eeprom.delay_usec);
+ ew32(EECD, eecd);
- /* Falling edge of clock */
- eecd &= ~E1000_EECD_SK;
- ew32(EECD, eecd);
- E1000_WRITE_FLUSH();
- udelay(hw->eeprom.delay_usec);
- }
+ /* Rising edge of clock */
+ eecd |= E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
- /* Stop requesting EEPROM access */
- if (hw->mac_type > e1000_82544) {
- eecd &= ~E1000_EECD_REQ;
- ew32(EECD, eecd);
- }
+ /* Falling edge of clock */
+ eecd &= ~E1000_EECD_SK;
+ ew32(EECD, eecd);
+ E1000_WRITE_FLUSH();
+ udelay(hw->eeprom.delay_usec);
+ }
- e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+ /* Stop requesting EEPROM access */
+ if (hw->mac_type > e1000_82544) {
+ eecd &= ~E1000_EECD_REQ;
+ ew32(EECD, eecd);
+ }
}
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ */
static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
{
- u16 retry_count = 0;
- u8 spi_stat_reg;
-
- DEBUGFUNC("e1000_spi_eeprom_ready");
-
- /* Read "Status Register" repeatedly until the LSB is cleared. The
- * EEPROM will signal that the command has been completed by clearing
- * bit 0 of the internal status register. If it's not cleared within
- * 5 milliseconds, then error out.
- */
- retry_count = 0;
- do {
- e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
- hw->eeprom.opcode_bits);
- spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
- if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
- break;
-
- udelay(5);
- retry_count += 5;
-
- e1000_standby_eeprom(hw);
- } while (retry_count < EEPROM_MAX_RETRY_SPI);
-
- /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
- * only 0-5mSec on 5V devices)
- */
- if (retry_count >= EEPROM_MAX_RETRY_SPI) {
- DEBUGOUT("SPI EEPROM Status error\n");
- return -E1000_ERR_EEPROM;
- }
-
- return E1000_SUCCESS;
-}
-
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- s32 ret;
- spin_lock(&e1000_eeprom_lock);
- ret = e1000_do_read_eeprom(hw, offset, words, data);
- spin_unlock(&e1000_eeprom_lock);
- return ret;
-}
-
-static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
-{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 i = 0;
-
- DEBUGFUNC("e1000_read_eeprom");
-
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
- */
- if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
- (words == 0)) {
- DEBUGOUT2("\"words\" parameter out of bounds. Words = %d, size = %d\n", offset, eeprom->word_size);
- return -E1000_ERR_EEPROM;
- }
-
- /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
- * directly. In this case, we need to acquire the EEPROM so that
- * FW or other port software does not interrupt.
- */
- if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
- /* Prepare the EEPROM for bit-bang reading */
- if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
- return -E1000_ERR_EEPROM;
- }
-
- /* Eerd register EEPROM access requires no eeprom aquire/release */
- if (eeprom->use_eerd)
- return e1000_read_eeprom_eerd(hw, offset, words, data);
-
- /* ICH EEPROM access is done via the ICH flash controller */
- if (eeprom->type == e1000_eeprom_ich8)
- return e1000_read_eeprom_ich8(hw, offset, words, data);
-
- /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should relase it */
- if (eeprom->type == e1000_eeprom_spi) {
- u16 word_in;
- u8 read_opcode = EEPROM_READ_OPCODE_SPI;
-
- if (e1000_spi_eeprom_ready(hw)) {
- e1000_release_eeprom(hw);
- return -E1000_ERR_EEPROM;
- }
-
- e1000_standby_eeprom(hw);
-
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
- if ((eeprom->address_bits == 8) && (offset >= 128))
- read_opcode |= EEPROM_A8_OPCODE_SPI;
-
- /* Send the READ command (opcode + addr) */
- e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
- e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
-
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
- for (i = 0; i < words; i++) {
- word_in = e1000_shift_in_ee_bits(hw, 16);
- data[i] = (word_in >> 8) | (word_in << 8);
- }
- } else if (eeprom->type == e1000_eeprom_microwire) {
- for (i = 0; i < words; i++) {
- /* Send the READ command (opcode + addr) */
- e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
- eeprom->opcode_bits);
- e1000_shift_out_ee_bits(hw, (u16)(offset + i),
- eeprom->address_bits);
-
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
- data[i] = e1000_shift_in_ee_bits(hw, 16);
- e1000_standby_eeprom(hw);
- }
- }
-
- /* End this read operation */
- e1000_release_eeprom(hw);
-
- return E1000_SUCCESS;
-}
+ u16 retry_count = 0;
+ u8 spi_stat_reg;
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM using the EERD register.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- u32 i, eerd = 0;
- s32 error = 0;
+ DEBUGFUNC("e1000_spi_eeprom_ready");
- for (i = 0; i < words; i++) {
- eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
- E1000_EEPROM_RW_REG_START;
+ /* Read "Status Register" repeatedly until the LSB is cleared. The
+ * EEPROM will signal that the command has been completed by clearing
+ * bit 0 of the internal status register. If it's not cleared within
+ * 5 milliseconds, then error out.
+ */
+ retry_count = 0;
+ do {
+ e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+ hw->eeprom.opcode_bits);
+ spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
+ if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+ break;
- ew32(EERD, eerd);
- error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+ udelay(5);
+ retry_count += 5;
- if (error) {
- break;
- }
- data[i] = (er32(EERD) >> E1000_EEPROM_RW_REG_DATA);
+ e1000_standby_eeprom(hw);
+ } while (retry_count < EEPROM_MAX_RETRY_SPI);
- }
+ /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+ * only 0-5mSec on 5V devices)
+ */
+ if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+ DEBUGOUT("SPI EEPROM Status error\n");
+ return -E1000_ERR_EEPROM;
+ }
- return error;
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Writes a 16 bit word from the EEPROM using the EEWR register.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+/**
+ * e1000_read_eeprom - Reads a 16 bit word from the EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ * @words: number of words to read
+ */
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
- u32 register_value = 0;
- u32 i = 0;
- s32 error = 0;
-
- if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
- return -E1000_ERR_SWFW_SYNC;
-
- for (i = 0; i < words; i++) {
- register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
- ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
- E1000_EEPROM_RW_REG_START;
-
- error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
- if (error) {
- break;
- }
-
- ew32(EEWR, register_value);
-
- error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-
- if (error) {
- break;
- }
- }
-
- e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
- return error;
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_read_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
}
-/******************************************************************************
- * Polls the status bit (bit 1) of the EERD to determine when the read is done.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
- u32 attempts = 100000;
- u32 i, reg = 0;
- s32 done = E1000_ERR_EEPROM;
-
- for (i = 0; i < attempts; i++) {
- if (eerd == E1000_EEPROM_POLL_READ)
- reg = er32(EERD);
- else
- reg = er32(EEWR);
-
- if (reg & E1000_EEPROM_RW_REG_DONE) {
- done = E1000_SUCCESS;
- break;
- }
- udelay(5);
- }
-
- return done;
-}
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 i = 0;
-/***************************************************************************
-* Description: Determines if the onboard NVM is FLASH or EEPROM.
-*
-* hw - Struct containing variables accessed by shared code
-****************************************************************************/
-static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
-{
- u32 eecd = 0;
+ DEBUGFUNC("e1000_read_eeprom");
- DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ DEBUGOUT2
+ ("\"words\" parameter out of bounds. Words = %d, size = %d\n",
+ offset, eeprom->word_size);
+ return -E1000_ERR_EEPROM;
+ }
- if (hw->mac_type == e1000_ich8lan)
- return false;
+ /* EEPROM's that don't use EERD to read require us to bit-bang the SPI
+ * directly. In this case, we need to acquire the EEPROM so that
+ * FW or other port software does not interrupt.
+ */
+ /* Prepare the EEPROM for bit-bang reading */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
+ * acquired the EEPROM at this point, so any returns should release it */
+ if (eeprom->type == e1000_eeprom_spi) {
+ u16 word_in;
+ u8 read_opcode = EEPROM_READ_OPCODE_SPI;
+
+ if (e1000_spi_eeprom_ready(hw)) {
+ e1000_release_eeprom(hw);
+ return -E1000_ERR_EEPROM;
+ }
- if (hw->mac_type == e1000_82573) {
- eecd = er32(EECD);
+ e1000_standby_eeprom(hw);
+
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
+ eeprom->address_bits);
+
+ /* Read the data. The address of the eeprom internally increments with
+ * each byte (spi) being read, saving on the overhead of eeprom setup
+ * and tear-down. The address counter will roll over if reading beyond
+ * the size of the eeprom, thus allowing the entire memory to be read
+ * starting from any offset. */
+ for (i = 0; i < words; i++) {
+ word_in = e1000_shift_in_ee_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
+ } else if (eeprom->type == e1000_eeprom_microwire) {
+ for (i = 0; i < words; i++) {
+ /* Send the READ command (opcode + addr) */
+ e1000_shift_out_ee_bits(hw,
+ EEPROM_READ_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+ e1000_shift_out_ee_bits(hw, (u16) (offset + i),
+ eeprom->address_bits);
+
+ /* Read the data. For microwire, each word requires the overhead
+ * of eeprom setup and tear-down. */
+ data[i] = e1000_shift_in_ee_bits(hw, 16);
+ e1000_standby_eeprom(hw);
+ }
+ }
- /* Isolate bits 15 & 16 */
- eecd = ((eecd >> 15) & 0x03);
+ /* End this read operation */
+ e1000_release_eeprom(hw);
- /* If both bits are set, device is Flash type */
- if (eecd == 0x03) {
- return false;
- }
- }
- return true;
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Verifies that the EEPROM has a valid checksum
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum
+ * @hw: Struct containing variables accessed by shared code
*
* Reads the first 64 16 bit words of the EEPROM and sums the values read.
* If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
* valid.
- *****************************************************************************/
+ */
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
{
- u16 checksum = 0;
- u16 i, eeprom_data;
-
- DEBUGFUNC("e1000_validate_eeprom_checksum");
-
- if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
- /* Check bit 4 of word 10h. If it is 0, firmware is done updating
- * 10h-12h. Checksum may need to be fixed. */
- e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
- if ((eeprom_data & 0x10) == 0) {
- /* Read 0x23 and check bit 15. This bit is a 1 when the checksum
- * has already been fixed. If the checksum is still wrong and this
- * bit is a 1, we need to return bad checksum. Otherwise, we need
- * to set this bit to a 1 and update the checksum. */
- e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
- if ((eeprom_data & 0x8000) == 0) {
- eeprom_data |= 0x8000;
- e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
- e1000_update_eeprom_checksum(hw);
- }
- }
- }
-
- if (hw->mac_type == e1000_ich8lan) {
- /* Drivers must allocate the shadow ram structure for the
- * EEPROM checksum to be updated. Otherwise, this bit as well
- * as the checksum must both be set correctly for this
- * validation to pass.
- */
- e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
- if ((eeprom_data & 0x40) == 0) {
- eeprom_data |= 0x40;
- e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
- e1000_update_eeprom_checksum(hw);
- }
- }
-
- for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
- if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- checksum += eeprom_data;
- }
-
- if (checksum == (u16)EEPROM_SUM)
- return E1000_SUCCESS;
- else {
- DEBUGOUT("EEPROM Checksum Invalid\n");
- return -E1000_ERR_EEPROM;
- }
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+ for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+
+ if (checksum == (u16) EEPROM_SUM)
+ return E1000_SUCCESS;
+ else {
+ DEBUGOUT("EEPROM Checksum Invalid\n");
+ return -E1000_ERR_EEPROM;
+ }
}
-/******************************************************************************
- * Calculates the EEPROM checksum and writes it to the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum
+ * @hw: Struct containing variables accessed by shared code
*
* Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
* Writes the difference to word offset 63 of the EEPROM.
- *****************************************************************************/
+ */
s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
{
- u32 ctrl_ext;
- u16 checksum = 0;
- u16 i, eeprom_data;
-
- DEBUGFUNC("e1000_update_eeprom_checksum");
-
- for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
- if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- checksum += eeprom_data;
- }
- checksum = (u16)EEPROM_SUM - checksum;
- if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
- DEBUGOUT("EEPROM Write Error\n");
- return -E1000_ERR_EEPROM;
- } else if (hw->eeprom.type == e1000_eeprom_flash) {
- e1000_commit_shadow_ram(hw);
- } else if (hw->eeprom.type == e1000_eeprom_ich8) {
- e1000_commit_shadow_ram(hw);
- /* Reload the EEPROM, or else modifications will not appear
- * until after next adapter reset. */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_EE_RST;
- ew32(CTRL_EXT, ctrl_ext);
- msleep(10);
- }
- return E1000_SUCCESS;
+ u16 checksum = 0;
+ u16 i, eeprom_data;
+
+ DEBUGFUNC("e1000_update_eeprom_checksum");
+
+ for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+ if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ checksum += eeprom_data;
+ }
+ checksum = (u16) EEPROM_SUM - checksum;
+ if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+ DEBUGOUT("EEPROM Write Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Parent function for writing words to the different EEPROM types.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - 16 bit word to be written to the EEPROM
+/**
+ * e1000_write_eeprom - write words to the different EEPROM types.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word to be written to the EEPROM
*
* If e1000_update_eeprom_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
- *****************************************************************************/
+ */
s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
- s32 ret;
- spin_lock(&e1000_eeprom_lock);
- ret = e1000_do_write_eeprom(hw, offset, words, data);
- spin_unlock(&e1000_eeprom_lock);
- return ret;
+ s32 ret;
+ spin_lock(&e1000_eeprom_lock);
+ ret = e1000_do_write_eeprom(hw, offset, words, data);
+ spin_unlock(&e1000_eeprom_lock);
+ return ret;
}
-
-static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- s32 status = 0;
-
- DEBUGFUNC("e1000_write_eeprom");
-
- /* If eeprom is not yet detected, do so now */
- if (eeprom->word_size == 0)
- e1000_init_eeprom_params(hw);
-
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
- */
- if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
- (words == 0)) {
- DEBUGOUT("\"words\" parameter out of bounds\n");
- return -E1000_ERR_EEPROM;
- }
-
- /* 82573 writes only through eewr */
- if (eeprom->use_eewr)
- return e1000_write_eeprom_eewr(hw, offset, words, data);
-
- if (eeprom->type == e1000_eeprom_ich8)
- return e1000_write_eeprom_ich8(hw, offset, words, data);
-
- /* Prepare the EEPROM for writing */
- if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
- return -E1000_ERR_EEPROM;
-
- if (eeprom->type == e1000_eeprom_microwire) {
- status = e1000_write_eeprom_microwire(hw, offset, words, data);
- } else {
- status = e1000_write_eeprom_spi(hw, offset, words, data);
- msleep(10);
- }
-
- /* Done with writing */
- e1000_release_eeprom(hw);
-
- return status;
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ s32 status = 0;
+
+ DEBUGFUNC("e1000_write_eeprom");
+
+ /* If eeprom is not yet detected, do so now */
+ if (eeprom->word_size == 0)
+ e1000_init_eeprom_params(hw);
+
+ /* A check for invalid values: offset too large, too many words, and not
+ * enough words.
+ */
+ if ((offset >= eeprom->word_size)
+ || (words > eeprom->word_size - offset) || (words == 0)) {
+ DEBUGOUT("\"words\" parameter out of bounds\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Prepare the EEPROM for writing */
+ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+ return -E1000_ERR_EEPROM;
+
+ if (eeprom->type == e1000_eeprom_microwire) {
+ status = e1000_write_eeprom_microwire(hw, offset, words, data);
+ } else {
+ status = e1000_write_eeprom_spi(hw, offset, words, data);
+ msleep(10);
+ }
+
+ /* Done with writing */
+ e1000_release_eeprom(hw);
+
+ return status;
}
-/******************************************************************************
- * Writes a 16 bit word to a given offset in an SPI EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - pointer to array of 8 bit words to be written to the EEPROM
- *
- *****************************************************************************/
+/**
+ * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u16 widx = 0;
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u16 widx = 0;
- DEBUGFUNC("e1000_write_eeprom_spi");
+ DEBUGFUNC("e1000_write_eeprom_spi");
- while (widx < words) {
- u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
+ while (widx < words) {
+ u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
- if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+ if (e1000_spi_eeprom_ready(hw))
+ return -E1000_ERR_EEPROM;
- e1000_standby_eeprom(hw);
+ e1000_standby_eeprom(hw);
- /* Send the WRITE ENABLE command (8 bit opcode ) */
- e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
- eeprom->opcode_bits);
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+ eeprom->opcode_bits);
- e1000_standby_eeprom(hw);
+ e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
- if ((eeprom->address_bits == 8) && (offset >= 128))
- write_opcode |= EEPROM_A8_OPCODE_SPI;
+ /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ if ((eeprom->address_bits == 8) && (offset >= 128))
+ write_opcode |= EEPROM_A8_OPCODE_SPI;
- /* Send the Write command (8-bit opcode + addr) */
- e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+ /* Send the Write command (8-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
- e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
- eeprom->address_bits);
+ e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
+ eeprom->address_bits);
- /* Send the data */
+ /* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
- while (widx < words) {
- u16 word_out = data[widx];
- word_out = (word_out >> 8) | (word_out << 8);
- e1000_shift_out_ee_bits(hw, word_out, 16);
- widx++;
+ /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ while (widx < words) {
+ u16 word_out = data[widx];
+ word_out = (word_out >> 8) | (word_out << 8);
+ e1000_shift_out_ee_bits(hw, word_out, 16);
+ widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
- */
- if ((((offset + widx)*2) % eeprom->page_size) == 0) {
- e1000_standby_eeprom(hw);
- break;
- }
- }
- }
+ /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+ * operation, while the smaller eeproms are capable of an 8-byte
+ * PAGE WRITE operation. Break the inner loop to pass new address
+ */
+ if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
+ e1000_standby_eeprom(hw);
+ break;
+ }
+ }
+ }
- return E1000_SUCCESS;
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Writes a 16 bit word to a given offset in a Microwire EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset within the EEPROM to be written to
- * words - number of words to write
- * data - pointer to array of 16 bit words to be written to the EEPROM
- *
- *****************************************************************************/
+/**
+ * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of words to write
+ * @data: pointer to array of 8 bit words to be written to the EEPROM
+ */
static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data)
{
- struct e1000_eeprom_info *eeprom = &hw->eeprom;
- u32 eecd;
- u16 words_written = 0;
- u16 i = 0;
-
- DEBUGFUNC("e1000_write_eeprom_microwire");
-
- /* Send the write enable command to the EEPROM (3-bit opcode plus
- * 6/8-bit dummy address beginning with 11). It's less work to include
- * the 11 of the dummy address as part of the opcode than it is to shift
- * it over the correct number of bits for the address. This puts the
- * EEPROM into write/erase mode.
- */
- e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
- (u16)(eeprom->opcode_bits + 2));
-
- e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
-
- /* Prepare the EEPROM */
- e1000_standby_eeprom(hw);
-
- while (words_written < words) {
- /* Send the Write command (3-bit opcode + addr) */
- e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
- eeprom->opcode_bits);
-
- e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
- eeprom->address_bits);
-
- /* Send the data */
- e1000_shift_out_ee_bits(hw, data[words_written], 16);
-
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
- */
- e1000_standby_eeprom(hw);
-
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
- */
- for (i = 0; i < 200; i++) {
- eecd = er32(EECD);
- if (eecd & E1000_EECD_DO) break;
- udelay(50);
- }
- if (i == 200) {
- DEBUGOUT("EEPROM Write did not complete\n");
- return -E1000_ERR_EEPROM;
- }
-
- /* Recover from write */
- e1000_standby_eeprom(hw);
-
- words_written++;
- }
-
- /* Send the write disable command to the EEPROM (3-bit opcode plus
- * 6/8-bit dummy address beginning with 10). It's less work to include
- * the 10 of the dummy address as part of the opcode than it is to shift
- * it over the correct number of bits for the address. This takes the
- * EEPROM out of write/erase mode.
- */
- e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
- (u16)(eeprom->opcode_bits + 2));
-
- e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
-
- return E1000_SUCCESS;
-}
+ struct e1000_eeprom_info *eeprom = &hw->eeprom;
+ u32 eecd;
+ u16 words_written = 0;
+ u16 i = 0;
-/******************************************************************************
- * Flushes the cached eeprom to NVM. This is done by saving the modified values
- * in the eeprom cache and the non modified values in the currently active bank
- * to the new bank.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_commit_shadow_ram(struct e1000_hw *hw)
-{
- u32 attempts = 100000;
- u32 eecd = 0;
- u32 flop = 0;
- u32 i = 0;
- s32 error = E1000_SUCCESS;
- u32 old_bank_offset = 0;
- u32 new_bank_offset = 0;
- u8 low_byte = 0;
- u8 high_byte = 0;
- bool sector_write_failed = false;
-
- if (hw->mac_type == e1000_82573) {
- /* The flop register will be used to determine if flash type is STM */
- flop = er32(FLOP);
- for (i=0; i < attempts; i++) {
- eecd = er32(EECD);
- if ((eecd & E1000_EECD_FLUPD) == 0) {
- break;
- }
- udelay(5);
- }
-
- if (i == attempts) {
- return -E1000_ERR_EEPROM;
- }
-
- /* If STM opcode located in bits 15:8 of flop, reset firmware */
- if ((flop & 0xFF00) == E1000_STM_OPCODE) {
- ew32(HICR, E1000_HICR_FW_RESET);
- }
-
- /* Perform the flash update */
- ew32(EECD, eecd | E1000_EECD_FLUPD);
-
- for (i=0; i < attempts; i++) {
- eecd = er32(EECD);
- if ((eecd & E1000_EECD_FLUPD) == 0) {
- break;
- }
- udelay(5);
- }
-
- if (i == attempts) {
- return -E1000_ERR_EEPROM;
- }
- }
-
- if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
- /* We're writing to the opposite bank so if we're on bank 1,
- * write to bank 0 etc. We also need to erase the segment that
- * is going to be written */
- if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
- new_bank_offset = hw->flash_bank_size * 2;
- old_bank_offset = 0;
- e1000_erase_ich8_4k_segment(hw, 1);
- } else {
- old_bank_offset = hw->flash_bank_size * 2;
- new_bank_offset = 0;
- e1000_erase_ich8_4k_segment(hw, 0);
- }
-
- sector_write_failed = false;
- /* Loop for every byte in the shadow RAM,
- * which is in units of words. */
- for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
- /* Determine whether to write the value stored
- * in the other NVM bank or a modified value stored
- * in the shadow RAM */
- if (hw->eeprom_shadow_ram[i].modified) {
- low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
- udelay(100);
- error = e1000_verify_write_ich8_byte(hw,
- (i << 1) + new_bank_offset, low_byte);
-
- if (error != E1000_SUCCESS)
- sector_write_failed = true;
- else {
- high_byte =
- (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
- udelay(100);
- }
- } else {
- e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
- &low_byte);
- udelay(100);
- error = e1000_verify_write_ich8_byte(hw,
- (i << 1) + new_bank_offset, low_byte);
-
- if (error != E1000_SUCCESS)
- sector_write_failed = true;
- else {
- e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
- &high_byte);
- udelay(100);
- }
- }
-
- /* If the write of the low byte was successful, go ahead and
- * write the high byte while checking to make sure that if it
- * is the signature byte, then it is handled properly */
- if (!sector_write_failed) {
- /* If the word is 0x13, then make sure the signature bits
- * (15:14) are 11b until the commit has completed.
- * This will allow us to write 10b which indicates the
- * signature is valid. We want to do this after the write
- * has completed so that we don't mark the segment valid
- * while the write is still in progress */
- if (i == E1000_ICH_NVM_SIG_WORD)
- high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
-
- error = e1000_verify_write_ich8_byte(hw,
- (i << 1) + new_bank_offset + 1, high_byte);
- if (error != E1000_SUCCESS)
- sector_write_failed = true;
-
- } else {
- /* If the write failed then break from the loop and
- * return an error */
- break;
- }
- }
-
- /* Don't bother writing the segment valid bits if sector
- * programming failed. */
- if (!sector_write_failed) {
- /* Finally validate the new segment by setting bit 15:14
- * to 10b in word 0x13 , this can be done without an
- * erase as well since these bits are 11 to start with
- * and we need to change bit 14 to 0b */
- e1000_read_ich8_byte(hw,
- E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
- &high_byte);
- high_byte &= 0xBF;
- error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
- /* And invalidate the previously valid segment by setting
- * its signature word (0x13) high_byte to 0b. This can be
- * done without an erase because flash erase sets all bits
- * to 1's. We can write 1's to 0's without an erase */
- if (error == E1000_SUCCESS) {
- error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
- }
-
- /* Clear the now not used entry in the cache */
- for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
- hw->eeprom_shadow_ram[i].modified = false;
- hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
- }
- }
- }
-
- return error;
+ DEBUGFUNC("e1000_write_eeprom_microwire");
+
+ /* Send the write enable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 11). It's less work to include
+ * the 11 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This puts the
+ * EEPROM into write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ /* Prepare the EEPROM */
+ e1000_standby_eeprom(hw);
+
+ while (words_written < words) {
+ /* Send the Write command (3-bit opcode + addr) */
+ e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+ eeprom->opcode_bits);
+
+ e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
+ eeprom->address_bits);
+
+ /* Send the data */
+ e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+ /* Toggle the CS line. This in effect tells the EEPROM to execute
+ * the previous command.
+ */
+ e1000_standby_eeprom(hw);
+
+ /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
+ * signal that the command has been completed by raising the DO signal.
+ * If DO does not go high in 10 milliseconds, then error out.
+ */
+ for (i = 0; i < 200; i++) {
+ eecd = er32(EECD);
+ if (eecd & E1000_EECD_DO)
+ break;
+ udelay(50);
+ }
+ if (i == 200) {
+ DEBUGOUT("EEPROM Write did not complete\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ /* Recover from write */
+ e1000_standby_eeprom(hw);
+
+ words_written++;
+ }
+
+ /* Send the write disable command to the EEPROM (3-bit opcode plus
+ * 6/8-bit dummy address beginning with 10). It's less work to include
+ * the 10 of the dummy address as part of the opcode than it is to shift
+ * it over the correct number of bits for the address. This takes the
+ * EEPROM out of write/erase mode.
+ */
+ e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+ (u16) (eeprom->opcode_bits + 2));
+
+ e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
+/**
+ * e1000_read_mac_addr - read the adapters MAC from eeprom
+ * @hw: Struct containing variables accessed by shared code
+ *
* Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
* second function of dual function devices
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ */
s32 e1000_read_mac_addr(struct e1000_hw *hw)
{
- u16 offset;
- u16 eeprom_data, i;
-
- DEBUGFUNC("e1000_read_mac_addr");
-
- for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
- offset = i >> 1;
- if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
- hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
- hw->perm_mac_addr[i+1] = (u8)(eeprom_data >> 8);
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_82546:
- case e1000_82546_rev_3:
- case e1000_82571:
- case e1000_80003es2lan:
- if (er32(STATUS) & E1000_STATUS_FUNC_1)
- hw->perm_mac_addr[5] ^= 0x01;
- break;
- }
-
- for (i = 0; i < NODE_ADDRESS_SIZE; i++)
- hw->mac_addr[i] = hw->perm_mac_addr[i];
- return E1000_SUCCESS;
+ u16 offset;
+ u16 eeprom_data, i;
+
+ DEBUGFUNC("e1000_read_mac_addr");
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+ offset = i >> 1;
+ if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+ hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
+ hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82546:
+ case e1000_82546_rev_3:
+ if (er32(STATUS) & E1000_STATUS_FUNC_1)
+ hw->perm_mac_addr[5] ^= 0x01;
+ break;
+ }
+
+ for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Initializes receive address filters.
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_init_rx_addrs - Initializes receive address filters.
+ * @hw: Struct containing variables accessed by shared code
*
* Places the MAC address in receive address register 0 and clears the rest
- * of the receive addresss registers. Clears the multicast table. Assumes
+ * of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
- *****************************************************************************/
+ */
static void e1000_init_rx_addrs(struct e1000_hw *hw)
{
- u32 i;
- u32 rar_num;
-
- DEBUGFUNC("e1000_init_rx_addrs");
-
- /* Setup the receive address. */
- DEBUGOUT("Programming MAC Address into RAR[0]\n");
-
- e1000_rar_set(hw, hw->mac_addr, 0);
-
- rar_num = E1000_RAR_ENTRIES;
-
- /* Reserve a spot for the Locally Administered Address to work around
- * an 82571 issue in which a reset on one port will reload the MAC on
- * the other port. */
- if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
- rar_num -= 1;
- if (hw->mac_type == e1000_ich8lan)
- rar_num = E1000_RAR_ENTRIES_ICH8LAN;
-
- /* Zero out the other 15 receive addresses. */
- DEBUGOUT("Clearing RAR[1-15]\n");
- for (i = 1; i < rar_num; i++) {
- E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- E1000_WRITE_FLUSH();
- E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- E1000_WRITE_FLUSH();
- }
+ u32 i;
+ u32 rar_num;
+
+ DEBUGFUNC("e1000_init_rx_addrs");
+
+ /* Setup the receive address. */
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+ e1000_rar_set(hw, hw->mac_addr, 0);
+
+ rar_num = E1000_RAR_ENTRIES;
+
+ /* Zero out the other 15 receive addresses. */
+ DEBUGOUT("Clearing RAR[1-15]\n");
+ for (i = 1; i < rar_num; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+ E1000_WRITE_FLUSH();
+ }
}
-/******************************************************************************
- * Hashes an address to determine its location in the multicast table
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr - the multicast address to hash
- *****************************************************************************/
+/**
+ * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table
+ * @hw: Struct containing variables accessed by shared code
+ * @mc_addr: the multicast address to hash
+ */
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
- u32 hash_value = 0;
-
- /* The portion of the address that is used for the hash table is
- * determined by the mc_filter_type setting.
- */
- switch (hw->mc_filter_type) {
- /* [0] [1] [2] [3] [4] [5]
- * 01 AA 00 12 34 56
- * LSB MSB
- */
- case 0:
- if (hw->mac_type == e1000_ich8lan) {
- /* [47:38] i.e. 0x158 for above example address */
- hash_value = ((mc_addr[4] >> 6) | (((u16)mc_addr[5]) << 2));
- } else {
- /* [47:36] i.e. 0x563 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
- }
- break;
- case 1:
- if (hw->mac_type == e1000_ich8lan) {
- /* [46:37] i.e. 0x2B1 for above example address */
- hash_value = ((mc_addr[4] >> 5) | (((u16)mc_addr[5]) << 3));
- } else {
- /* [46:35] i.e. 0xAC6 for above example address */
- hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
- }
- break;
- case 2:
- if (hw->mac_type == e1000_ich8lan) {
- /*[45:36] i.e. 0x163 for above example address */
- hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
- } else {
- /* [45:34] i.e. 0x5D8 for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
- }
- break;
- case 3:
- if (hw->mac_type == e1000_ich8lan) {
- /* [43:34] i.e. 0x18D for above example address */
- hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
- } else {
- /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
- }
- break;
- }
-
- hash_value &= 0xFFF;
- if (hw->mac_type == e1000_ich8lan)
- hash_value &= 0x3FF;
-
- return hash_value;
+ u32 hash_value = 0;
+
+ /* The portion of the address that is used for the hash table is
+ * determined by the mc_filter_type setting.
+ */
+ switch (hw->mc_filter_type) {
+ /* [0] [1] [2] [3] [4] [5]
+ * 01 AA 00 12 34 56
+ * LSB MSB
+ */
+ case 0:
+ /* [47:36] i.e. 0x563 for above example address */
+ hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+ break;
+ case 1:
+ /* [46:35] i.e. 0xAC6 for above example address */
+ hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+ break;
+ case 2:
+ /* [45:34] i.e. 0x5D8 for above example address */
+ hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+ break;
+ case 3:
+ /* [43:32] i.e. 0x634 for above example address */
+ hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+ break;
+ }
+
+ hash_value &= 0xFFF;
+ return hash_value;
}
-/******************************************************************************
- * Puts an ethernet address into a receive address register.
- *
- * hw - Struct containing variables accessed by shared code
- * addr - Address to put into receive address register
- * index - Receive address register to write
- *****************************************************************************/
+/**
+ * e1000_rar_set - Puts an ethernet address into a receive address register.
+ * @hw: Struct containing variables accessed by shared code
+ * @addr: Address to put into receive address register
+ * @index: Receive address register to write
+ */
void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
{
- u32 rar_low, rar_high;
-
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
- rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
-
- /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
- * unit hang.
- *
- * Description:
- * If there are any Rx frames queued up or otherwise present in the HW
- * before RSS is enabled, and then we enable RSS, the HW Rx unit will
- * hang. To work around this issue, we have to disable receives and
- * flush out all Rx frames before we enable RSS. To do so, we modify we
- * redirect all Rx traffic to manageability and then reset the HW.
- * This flushes away Rx frames, and (since the redirections to
- * manageability persists across resets) keeps new ones from coming in
- * while we work. Then, we clear the Address Valid AV bit for all MAC
- * addresses and undo the re-direction to manageability.
- * Now, frames are coming in again, but the MAC won't accept them, so
- * far so good. We now proceed to initialize RSS (if necessary) and
- * configure the Rx unit. Last, we re-enable the AV bits and continue
- * on our merry way.
- */
- switch (hw->mac_type) {
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- if (hw->leave_av_bit_off)
- break;
- default:
- /* Indicate to hardware the Address is Valid. */
- rar_high |= E1000_RAH_AV;
- break;
- }
-
- E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
- E1000_WRITE_FLUSH();
- E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
- E1000_WRITE_FLUSH();
+ u32 rar_low, rar_high;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+ * unit hang.
+ *
+ * Description:
+ * If there are any Rx frames queued up or otherwise present in the HW
+ * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+ * hang. To work around this issue, we have to disable receives and
+ * flush out all Rx frames before we enable RSS. To do so, we modify we
+ * redirect all Rx traffic to manageability and then reset the HW.
+ * This flushes away Rx frames, and (since the redirections to
+ * manageability persists across resets) keeps new ones from coming in
+ * while we work. Then, we clear the Address Valid AV bit for all MAC
+ * addresses and undo the re-direction to manageability.
+ * Now, frames are coming in again, but the MAC won't accept them, so
+ * far so good. We now proceed to initialize RSS (if necessary) and
+ * configure the Rx unit. Last, we re-enable the AV bits and continue
+ * on our merry way.
+ */
+ switch (hw->mac_type) {
+ default:
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+ break;
+ }
+
+ E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+ E1000_WRITE_FLUSH();
}
-/******************************************************************************
- * Writes a value to the specified offset in the VLAN filter table.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
- * value - Value to write into VLAN filter table
- *****************************************************************************/
+/**
+ * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: Offset in VLAN filer table to write
+ * @value: Value to write into VLAN filter table
+ */
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
{
- u32 temp;
-
- if (hw->mac_type == e1000_ich8lan)
- return;
-
- if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
- temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- E1000_WRITE_FLUSH();
- E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
- E1000_WRITE_FLUSH();
- } else {
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
- E1000_WRITE_FLUSH();
- }
+ u32 temp;
+
+ if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+ temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+ E1000_WRITE_FLUSH();
+ } else {
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+ E1000_WRITE_FLUSH();
+ }
}
-/******************************************************************************
- * Clears the VLAN filer table
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_clear_vfta - Clears the VLAN filer table
+ * @hw: Struct containing variables accessed by shared code
+ */
static void e1000_clear_vfta(struct e1000_hw *hw)
{
- u32 offset;
- u32 vfta_value = 0;
- u32 vfta_offset = 0;
- u32 vfta_bit_in_reg = 0;
-
- if (hw->mac_type == e1000_ich8lan)
- return;
-
- if (hw->mac_type == e1000_82573) {
- if (hw->mng_cookie.vlan_id != 0) {
- /* The VFTA is a 4096b bit-field, each identifying a single VLAN
- * ID. The following operations determine which 32b entry
- * (i.e. offset) into the array we want to set the VLAN ID
- * (i.e. bit) of the manageability unit. */
- vfta_offset = (hw->mng_cookie.vlan_id >>
- E1000_VFTA_ENTRY_SHIFT) &
- E1000_VFTA_ENTRY_MASK;
- vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- }
- }
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /* If the offset we want to clear is the same offset of the
- * manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
- vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
- E1000_WRITE_FLUSH();
- }
+ u32 offset;
+ u32 vfta_value = 0;
+ u32 vfta_offset = 0;
+ u32 vfta_bit_in_reg = 0;
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ /* If the offset we want to clear is the same offset of the
+ * manageability VLAN ID, then clear all bits except that of the
+ * manageability unit */
+ vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_FLUSH();
+ }
}
static s32 e1000_id_led_init(struct e1000_hw *hw)
{
- u32 ledctl;
- const u32 ledctl_mask = 0x000000FF;
- const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
- const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
- u16 eeprom_data, i, temp;
- const u16 led_mask = 0x0F;
-
- DEBUGFUNC("e1000_id_led_init");
-
- if (hw->mac_type < e1000_82540) {
- /* Nothing to do */
- return E1000_SUCCESS;
- }
-
- ledctl = er32(LEDCTL);
- hw->ledctl_default = ledctl;
- hw->ledctl_mode1 = hw->ledctl_default;
- hw->ledctl_mode2 = hw->ledctl_default;
-
- if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
- DEBUGOUT("EEPROM Read Error\n");
- return -E1000_ERR_EEPROM;
- }
-
- if ((hw->mac_type == e1000_82573) &&
- (eeprom_data == ID_LED_RESERVED_82573))
- eeprom_data = ID_LED_DEFAULT_82573;
- else if ((eeprom_data == ID_LED_RESERVED_0000) ||
- (eeprom_data == ID_LED_RESERVED_FFFF)) {
- if (hw->mac_type == e1000_ich8lan)
- eeprom_data = ID_LED_DEFAULT_ICH8LAN;
- else
- eeprom_data = ID_LED_DEFAULT;
- }
-
- for (i = 0; i < 4; i++) {
- temp = (eeprom_data >> (i << 2)) & led_mask;
- switch (temp) {
- case ID_LED_ON1_DEF2:
- case ID_LED_ON1_ON2:
- case ID_LED_ON1_OFF2:
- hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
- hw->ledctl_mode1 |= ledctl_on << (i << 3);
- break;
- case ID_LED_OFF1_DEF2:
- case ID_LED_OFF1_ON2:
- case ID_LED_OFF1_OFF2:
- hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
- hw->ledctl_mode1 |= ledctl_off << (i << 3);
- break;
- default:
- /* Do nothing */
- break;
- }
- switch (temp) {
- case ID_LED_DEF1_ON2:
- case ID_LED_ON1_ON2:
- case ID_LED_OFF1_ON2:
- hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
- hw->ledctl_mode2 |= ledctl_on << (i << 3);
- break;
- case ID_LED_DEF1_OFF2:
- case ID_LED_ON1_OFF2:
- case ID_LED_OFF1_OFF2:
- hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
- hw->ledctl_mode2 |= ledctl_off << (i << 3);
- break;
- default:
- /* Do nothing */
- break;
- }
- }
- return E1000_SUCCESS;
+ u32 ledctl;
+ const u32 ledctl_mask = 0x000000FF;
+ const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+ const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+ u16 eeprom_data, i, temp;
+ const u16 led_mask = 0x0F;
+
+ DEBUGFUNC("e1000_id_led_init");
+
+ if (hw->mac_type < e1000_82540) {
+ /* Nothing to do */
+ return E1000_SUCCESS;
+ }
+
+ ledctl = er32(LEDCTL);
+ hw->ledctl_default = ledctl;
+ hw->ledctl_mode1 = hw->ledctl_default;
+ hw->ledctl_mode2 = hw->ledctl_default;
+
+ if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+ DEBUGOUT("EEPROM Read Error\n");
+ return -E1000_ERR_EEPROM;
+ }
+
+ if ((eeprom_data == ID_LED_RESERVED_0000) ||
+ (eeprom_data == ID_LED_RESERVED_FFFF)) {
+ eeprom_data = ID_LED_DEFAULT;
+ }
+
+ for (i = 0; i < 4; i++) {
+ temp = (eeprom_data >> (i << 2)) & led_mask;
+ switch (temp) {
+ case ID_LED_ON1_DEF2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_ON1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_OFF1_DEF2:
+ case ID_LED_OFF1_ON2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode1 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ switch (temp) {
+ case ID_LED_DEF1_ON2:
+ case ID_LED_ON1_ON2:
+ case ID_LED_OFF1_ON2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_on << (i << 3);
+ break;
+ case ID_LED_DEF1_OFF2:
+ case ID_LED_ON1_OFF2:
+ case ID_LED_OFF1_OFF2:
+ hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+ hw->ledctl_mode2 |= ledctl_off << (i << 3);
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Prepares SW controlable LED for use and saves the current state of the LED.
+/**
+ * e1000_setup_led
+ * @hw: Struct containing variables accessed by shared code
*
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ */
s32 e1000_setup_led(struct e1000_hw *hw)
{
- u32 ledctl;
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_setup_led");
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- case e1000_82544:
- /* No setup necessary */
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- /* Turn off PHY Smart Power Down (if enabled) */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
- &hw->phy_spd_default);
- if (ret_val)
- return ret_val;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
- (u16)(hw->phy_spd_default &
- ~IGP01E1000_GMII_SPD));
- if (ret_val)
- return ret_val;
- /* Fall Through */
- default:
- if (hw->media_type == e1000_media_type_fiber) {
- ledctl = er32(LEDCTL);
- /* Save current LEDCTL settings */
- hw->ledctl_default = ledctl;
- /* Turn off LED0 */
- ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
- E1000_LEDCTL_LED0_BLINK |
- E1000_LEDCTL_LED0_MODE_MASK);
- ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
- E1000_LEDCTL_LED0_MODE_SHIFT);
- ew32(LEDCTL, ledctl);
- } else if (hw->media_type == e1000_media_type_copper)
- ew32(LEDCTL, hw->ledctl_mode1);
- break;
- }
-
- return E1000_SUCCESS;
-}
+ u32 ledctl;
+ s32 ret_val = E1000_SUCCESS;
+ DEBUGFUNC("e1000_setup_led");
-/******************************************************************************
- * Used on 82571 and later Si that has LED blink bits.
- * Callers must use their own timer and should have already called
- * e1000_id_led_init()
- * Call e1000_cleanup led() to stop blinking
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-s32 e1000_blink_led_start(struct e1000_hw *hw)
-{
- s16 i;
- u32 ledctl_blink = 0;
-
- DEBUGFUNC("e1000_id_led_blink_on");
-
- if (hw->mac_type < e1000_82571) {
- /* Nothing to do */
- return E1000_SUCCESS;
- }
- if (hw->media_type == e1000_media_type_fiber) {
- /* always blink LED0 for PCI-E fiber */
- ledctl_blink = E1000_LEDCTL_LED0_BLINK |
- (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
- } else {
- /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
- ledctl_blink = hw->ledctl_mode2;
- for (i=0; i < 4; i++)
- if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
- E1000_LEDCTL_MODE_LED_ON)
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
- }
-
- ew32(LEDCTL, ledctl_blink);
-
- return E1000_SUCCESS;
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No setup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn off PHY Smart Power Down (if enabled) */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ &hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ (u16) (hw->phy_spd_default &
+ ~IGP01E1000_GMII_SPD));
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ ledctl = er32(LEDCTL);
+ /* Save current LEDCTL settings */
+ hw->ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+ E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ ew32(LEDCTL, ledctl);
+ } else if (hw->media_type == e1000_media_type_copper)
+ ew32(LEDCTL, hw->ledctl_mode1);
+ break;
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Restores the saved state of the SW controlable LED.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_cleanup_led - Restores the saved state of the SW controlable LED.
+ * @hw: Struct containing variables accessed by shared code
+ */
s32 e1000_cleanup_led(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
-
- DEBUGFUNC("e1000_cleanup_led");
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- case e1000_82544:
- /* No cleanup necessary */
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- /* Turn on PHY Smart Power Down (if previously enabled) */
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
- hw->phy_spd_default);
- if (ret_val)
- return ret_val;
- /* Fall Through */
- default:
- if (hw->phy_type == e1000_phy_ife) {
- e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
- break;
- }
- /* Restore LEDCTL settings */
- ew32(LEDCTL, hw->ledctl_default);
- break;
- }
-
- return E1000_SUCCESS;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_cleanup_led");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ case e1000_82544:
+ /* No cleanup necessary */
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ /* Turn on PHY Smart Power Down (if previously enabled) */
+ ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ hw->phy_spd_default);
+ if (ret_val)
+ return ret_val;
+ /* Fall Through */
+ default:
+ /* Restore LEDCTL settings */
+ ew32(LEDCTL, hw->ledctl_default);
+ break;
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Turns on the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_led_on - Turns on the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
s32 e1000_led_on(struct e1000_hw *hw)
{
- u32 ctrl = er32(CTRL);
-
- DEBUGFUNC("e1000_led_on");
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- /* Set SW Defineable Pin 0 to turn on the LED */
- ctrl |= E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- break;
- case e1000_82544:
- if (hw->media_type == e1000_media_type_fiber) {
- /* Set SW Defineable Pin 0 to turn on the LED */
- ctrl |= E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- } else {
- /* Clear SW Defineable Pin 0 to turn on the LED */
- ctrl &= ~E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- }
- break;
- default:
- if (hw->media_type == e1000_media_type_fiber) {
- /* Clear SW Defineable Pin 0 to turn on the LED */
- ctrl &= ~E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- } else if (hw->phy_type == e1000_phy_ife) {
- e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
- } else if (hw->media_type == e1000_media_type_copper) {
- ew32(LEDCTL, hw->ledctl_mode2);
- return E1000_SUCCESS;
- }
- break;
- }
-
- ew32(CTRL, ctrl);
-
- return E1000_SUCCESS;
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_on");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn on the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn on the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode2);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Turns off the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_led_off - Turns off the software controllable LED
+ * @hw: Struct containing variables accessed by shared code
+ */
s32 e1000_led_off(struct e1000_hw *hw)
{
- u32 ctrl = er32(CTRL);
-
- DEBUGFUNC("e1000_led_off");
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- case e1000_82543:
- /* Clear SW Defineable Pin 0 to turn off the LED */
- ctrl &= ~E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- break;
- case e1000_82544:
- if (hw->media_type == e1000_media_type_fiber) {
- /* Clear SW Defineable Pin 0 to turn off the LED */
- ctrl &= ~E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- } else {
- /* Set SW Defineable Pin 0 to turn off the LED */
- ctrl |= E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- }
- break;
- default:
- if (hw->media_type == e1000_media_type_fiber) {
- /* Set SW Defineable Pin 0 to turn off the LED */
- ctrl |= E1000_CTRL_SWDPIN0;
- ctrl |= E1000_CTRL_SWDPIO0;
- } else if (hw->phy_type == e1000_phy_ife) {
- e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
- (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
- } else if (hw->media_type == e1000_media_type_copper) {
- ew32(LEDCTL, hw->ledctl_mode1);
- return E1000_SUCCESS;
- }
- break;
- }
-
- ew32(CTRL, ctrl);
-
- return E1000_SUCCESS;
+ u32 ctrl = er32(CTRL);
+
+ DEBUGFUNC("e1000_led_off");
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ case e1000_82543:
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ break;
+ case e1000_82544:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Clear SW Defineable Pin 0 to turn off the LED */
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ }
+ break;
+ default:
+ if (hw->media_type == e1000_media_type_fiber) {
+ /* Set SW Defineable Pin 0 to turn off the LED */
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ } else if (hw->media_type == e1000_media_type_copper) {
+ ew32(LEDCTL, hw->ledctl_mode1);
+ return E1000_SUCCESS;
+ }
+ break;
+ }
+
+ ew32(CTRL, ctrl);
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Clears all hardware statistics counters.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+/**
+ * e1000_clear_hw_cntrs - Clears all hardware statistics counters.
+ * @hw: Struct containing variables accessed by shared code
+ */
static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
- volatile u32 temp;
-
- temp = er32(CRCERRS);
- temp = er32(SYMERRS);
- temp = er32(MPC);
- temp = er32(SCC);
- temp = er32(ECOL);
- temp = er32(MCC);
- temp = er32(LATECOL);
- temp = er32(COLC);
- temp = er32(DC);
- temp = er32(SEC);
- temp = er32(RLEC);
- temp = er32(XONRXC);
- temp = er32(XONTXC);
- temp = er32(XOFFRXC);
- temp = er32(XOFFTXC);
- temp = er32(FCRUC);
-
- if (hw->mac_type != e1000_ich8lan) {
- temp = er32(PRC64);
- temp = er32(PRC127);
- temp = er32(PRC255);
- temp = er32(PRC511);
- temp = er32(PRC1023);
- temp = er32(PRC1522);
- }
-
- temp = er32(GPRC);
- temp = er32(BPRC);
- temp = er32(MPRC);
- temp = er32(GPTC);
- temp = er32(GORCL);
- temp = er32(GORCH);
- temp = er32(GOTCL);
- temp = er32(GOTCH);
- temp = er32(RNBC);
- temp = er32(RUC);
- temp = er32(RFC);
- temp = er32(ROC);
- temp = er32(RJC);
- temp = er32(TORL);
- temp = er32(TORH);
- temp = er32(TOTL);
- temp = er32(TOTH);
- temp = er32(TPR);
- temp = er32(TPT);
-
- if (hw->mac_type != e1000_ich8lan) {
- temp = er32(PTC64);
- temp = er32(PTC127);
- temp = er32(PTC255);
- temp = er32(PTC511);
- temp = er32(PTC1023);
- temp = er32(PTC1522);
- }
-
- temp = er32(MPTC);
- temp = er32(BPTC);
-
- if (hw->mac_type < e1000_82543) return;
-
- temp = er32(ALGNERRC);
- temp = er32(RXERRC);
- temp = er32(TNCRS);
- temp = er32(CEXTERR);
- temp = er32(TSCTC);
- temp = er32(TSCTFC);
-
- if (hw->mac_type <= e1000_82544) return;
-
- temp = er32(MGTPRC);
- temp = er32(MGTPDC);
- temp = er32(MGTPTC);
-
- if (hw->mac_type <= e1000_82547_rev_2) return;
-
- temp = er32(IAC);
- temp = er32(ICRXOC);
-
- if (hw->mac_type == e1000_ich8lan) return;
-
- temp = er32(ICRXPTC);
- temp = er32(ICRXATC);
- temp = er32(ICTXPTC);
- temp = er32(ICTXATC);
- temp = er32(ICTXQEC);
- temp = er32(ICTXQMTC);
- temp = er32(ICRXDMTC);
-}
-
-/******************************************************************************
- * Resets Adaptive IFS to its default state.
- *
- * hw - Struct containing variables accessed by shared code
+ volatile u32 temp;
+
+ temp = er32(CRCERRS);
+ temp = er32(SYMERRS);
+ temp = er32(MPC);
+ temp = er32(SCC);
+ temp = er32(ECOL);
+ temp = er32(MCC);
+ temp = er32(LATECOL);
+ temp = er32(COLC);
+ temp = er32(DC);
+ temp = er32(SEC);
+ temp = er32(RLEC);
+ temp = er32(XONRXC);
+ temp = er32(XONTXC);
+ temp = er32(XOFFRXC);
+ temp = er32(XOFFTXC);
+ temp = er32(FCRUC);
+
+ temp = er32(PRC64);
+ temp = er32(PRC127);
+ temp = er32(PRC255);
+ temp = er32(PRC511);
+ temp = er32(PRC1023);
+ temp = er32(PRC1522);
+
+ temp = er32(GPRC);
+ temp = er32(BPRC);
+ temp = er32(MPRC);
+ temp = er32(GPTC);
+ temp = er32(GORCL);
+ temp = er32(GORCH);
+ temp = er32(GOTCL);
+ temp = er32(GOTCH);
+ temp = er32(RNBC);
+ temp = er32(RUC);
+ temp = er32(RFC);
+ temp = er32(ROC);
+ temp = er32(RJC);
+ temp = er32(TORL);
+ temp = er32(TORH);
+ temp = er32(TOTL);
+ temp = er32(TOTH);
+ temp = er32(TPR);
+ temp = er32(TPT);
+
+ temp = er32(PTC64);
+ temp = er32(PTC127);
+ temp = er32(PTC255);
+ temp = er32(PTC511);
+ temp = er32(PTC1023);
+ temp = er32(PTC1522);
+
+ temp = er32(MPTC);
+ temp = er32(BPTC);
+
+ if (hw->mac_type < e1000_82543)
+ return;
+
+ temp = er32(ALGNERRC);
+ temp = er32(RXERRC);
+ temp = er32(TNCRS);
+ temp = er32(CEXTERR);
+ temp = er32(TSCTC);
+ temp = er32(TSCTFC);
+
+ if (hw->mac_type <= e1000_82544)
+ return;
+
+ temp = er32(MGTPRC);
+ temp = er32(MGTPDC);
+ temp = er32(MGTPTC);
+}
+
+/**
+ * e1000_reset_adaptive - Resets Adaptive IFS to its default state.
+ * @hw: Struct containing variables accessed by shared code
*
* Call this after e1000_init_hw. You may override the IFS defaults by setting
* hw->ifs_params_forced to true. However, you must initialize hw->
* current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
* before calling this function.
- *****************************************************************************/
+ */
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_reset_adaptive");
-
- if (hw->adaptive_ifs) {
- if (!hw->ifs_params_forced) {
- hw->current_ifs_val = 0;
- hw->ifs_min_val = IFS_MIN;
- hw->ifs_max_val = IFS_MAX;
- hw->ifs_step_size = IFS_STEP;
- hw->ifs_ratio = IFS_RATIO;
- }
- hw->in_ifs_mode = false;
- ew32(AIT, 0);
- } else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
- }
+ DEBUGFUNC("e1000_reset_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if (!hw->ifs_params_forced) {
+ hw->current_ifs_val = 0;
+ hw->ifs_min_val = IFS_MIN;
+ hw->ifs_max_val = IFS_MAX;
+ hw->ifs_step_size = IFS_STEP;
+ hw->ifs_ratio = IFS_RATIO;
+ }
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
}
-/******************************************************************************
+/**
+ * e1000_update_adaptive - update adaptive IFS
+ * @hw: Struct containing variables accessed by shared code
+ * @tx_packets: Number of transmits since last callback
+ * @total_collisions: Number of collisions since last callback
+ *
* Called during the callback/watchdog routine to update IFS value based on
* the ratio of transmits to collisions.
- *
- * hw - Struct containing variables accessed by shared code
- * tx_packets - Number of transmits since last callback
- * total_collisions - Number of collisions since last callback
- *****************************************************************************/
+ */
void e1000_update_adaptive(struct e1000_hw *hw)
{
- DEBUGFUNC("e1000_update_adaptive");
-
- if (hw->adaptive_ifs) {
- if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
- if (hw->tx_packet_delta > MIN_NUM_XMITS) {
- hw->in_ifs_mode = true;
- if (hw->current_ifs_val < hw->ifs_max_val) {
- if (hw->current_ifs_val == 0)
- hw->current_ifs_val = hw->ifs_min_val;
- else
- hw->current_ifs_val += hw->ifs_step_size;
- ew32(AIT, hw->current_ifs_val);
- }
- }
- } else {
- if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
- hw->current_ifs_val = 0;
- hw->in_ifs_mode = false;
- ew32(AIT, 0);
- }
- }
- } else {
- DEBUGOUT("Not in Adaptive IFS mode!\n");
- }
+ DEBUGFUNC("e1000_update_adaptive");
+
+ if (hw->adaptive_ifs) {
+ if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
+ if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+ hw->in_ifs_mode = true;
+ if (hw->current_ifs_val < hw->ifs_max_val) {
+ if (hw->current_ifs_val == 0)
+ hw->current_ifs_val =
+ hw->ifs_min_val;
+ else
+ hw->current_ifs_val +=
+ hw->ifs_step_size;
+ ew32(AIT, hw->current_ifs_val);
+ }
+ }
+ } else {
+ if (hw->in_ifs_mode
+ && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+ hw->current_ifs_val = 0;
+ hw->in_ifs_mode = false;
+ ew32(AIT, 0);
+ }
+ }
+ } else {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ }
}
-/******************************************************************************
- * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+/**
+ * e1000_tbi_adjust_stats
+ * @hw: Struct containing variables accessed by shared code
+ * @frame_len: The length of the frame in question
+ * @mac_addr: The Ethernet destination address of the frame in question
*
- * hw - Struct containing variables accessed by shared code
- * frame_len - The length of the frame in question
- * mac_addr - The Ethernet destination address of the frame in question
- *****************************************************************************/
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ */
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
u32 frame_len, u8 *mac_addr)
{
- u64 carry_bit;
-
- /* First adjust the frame length. */
- frame_len--;
- /* We need to adjust the statistics counters, since the hardware
- * counters overcount this packet as a CRC error and undercount
- * the packet as a good packet
- */
- /* This packet should not be counted as a CRC error. */
- stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
- stats->gprc++;
-
- /* Adjust the Good Octets received counters */
- carry_bit = 0x80000000 & stats->gorcl;
- stats->gorcl += frame_len;
- /* If the high bit of Gorcl (the low 32 bits of the Good Octets
- * Received Count) was one before the addition,
- * AND it is zero after, then we lost the carry out,
- * need to add one to Gorch (Good Octets Received Count High).
- * This could be simplified if all environments supported
- * 64-bit integers.
- */
- if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
- stats->gorch++;
- /* Is this a broadcast or multicast? Check broadcast first,
- * since the test for a multicast frame will test positive on
- * a broadcast frame.
- */
- if ((mac_addr[0] == (u8)0xff) && (mac_addr[1] == (u8)0xff))
- /* Broadcast packet */
- stats->bprc++;
- else if (*mac_addr & 0x01)
- /* Multicast packet */
- stats->mprc++;
-
- if (frame_len == hw->max_frame_size) {
- /* In this case, the hardware has overcounted the number of
- * oversize frames.
- */
- if (stats->roc > 0)
- stats->roc--;
- }
-
- /* Adjust the bin counters when the extra byte put the frame in the
- * wrong bin. Remember that the frame_len was adjusted above.
- */
- if (frame_len == 64) {
- stats->prc64++;
- stats->prc127--;
- } else if (frame_len == 127) {
- stats->prc127++;
- stats->prc255--;
- } else if (frame_len == 255) {
- stats->prc255++;
- stats->prc511--;
- } else if (frame_len == 511) {
- stats->prc511++;
- stats->prc1023--;
- } else if (frame_len == 1023) {
- stats->prc1023++;
- stats->prc1522--;
- } else if (frame_len == 1522) {
- stats->prc1522++;
- }
+ u64 carry_bit;
+
+ /* First adjust the frame length. */
+ frame_len--;
+ /* We need to adjust the statistics counters, since the hardware
+ * counters overcount this packet as a CRC error and undercount
+ * the packet as a good packet
+ */
+ /* This packet should not be counted as a CRC error. */
+ stats->crcerrs--;
+ /* This packet does count as a Good Packet Received. */
+ stats->gprc++;
+
+ /* Adjust the Good Octets received counters */
+ carry_bit = 0x80000000 & stats->gorcl;
+ stats->gorcl += frame_len;
+ /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+ * Received Count) was one before the addition,
+ * AND it is zero after, then we lost the carry out,
+ * need to add one to Gorch (Good Octets Received Count High).
+ * This could be simplified if all environments supported
+ * 64-bit integers.
+ */
+ if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+ stats->gorch++;
+ /* Is this a broadcast or multicast? Check broadcast first,
+ * since the test for a multicast frame will test positive on
+ * a broadcast frame.
+ */
+ if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ /* Broadcast packet */
+ stats->bprc++;
+ else if (*mac_addr & 0x01)
+ /* Multicast packet */
+ stats->mprc++;
+
+ if (frame_len == hw->max_frame_size) {
+ /* In this case, the hardware has overcounted the number of
+ * oversize frames.
+ */
+ if (stats->roc > 0)
+ stats->roc--;
+ }
+
+ /* Adjust the bin counters when the extra byte put the frame in the
+ * wrong bin. Remember that the frame_len was adjusted above.
+ */
+ if (frame_len == 64) {
+ stats->prc64++;
+ stats->prc127--;
+ } else if (frame_len == 127) {
+ stats->prc127++;
+ stats->prc255--;
+ } else if (frame_len == 255) {
+ stats->prc255++;
+ stats->prc511--;
+ } else if (frame_len == 511) {
+ stats->prc511++;
+ stats->prc1023--;
+ } else if (frame_len == 1023) {
+ stats->prc1023++;
+ stats->prc1522--;
+ } else if (frame_len == 1522) {
+ stats->prc1522++;
+ }
}
-/******************************************************************************
- * Gets the current PCI bus type, speed, and width of the hardware
+/**
+ * e1000_get_bus_info
+ * @hw: Struct containing variables accessed by shared code
*
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
+ * Gets the current PCI bus type, speed, and width of the hardware
+ */
void e1000_get_bus_info(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 pci_ex_link_status;
- u32 status;
-
- switch (hw->mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
- hw->bus_type = e1000_bus_type_pci;
- hw->bus_speed = e1000_bus_speed_unknown;
- hw->bus_width = e1000_bus_width_unknown;
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- hw->bus_type = e1000_bus_type_pci_express;
- hw->bus_speed = e1000_bus_speed_2500;
- ret_val = e1000_read_pcie_cap_reg(hw,
- PCI_EX_LINK_STATUS,
- &pci_ex_link_status);
- if (ret_val)
- hw->bus_width = e1000_bus_width_unknown;
- else
- hw->bus_width = (pci_ex_link_status & PCI_EX_LINK_WIDTH_MASK) >>
- PCI_EX_LINK_WIDTH_SHIFT;
- break;
- case e1000_ich8lan:
- hw->bus_type = e1000_bus_type_pci_express;
- hw->bus_speed = e1000_bus_speed_2500;
- hw->bus_width = e1000_bus_width_pciex_1;
- break;
- default:
- status = er32(STATUS);
- hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
- e1000_bus_type_pcix : e1000_bus_type_pci;
-
- if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
- hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
- e1000_bus_speed_66 : e1000_bus_speed_120;
- } else if (hw->bus_type == e1000_bus_type_pci) {
- hw->bus_speed = (status & E1000_STATUS_PCI66) ?
- e1000_bus_speed_66 : e1000_bus_speed_33;
- } else {
- switch (status & E1000_STATUS_PCIX_SPEED) {
- case E1000_STATUS_PCIX_SPEED_66:
- hw->bus_speed = e1000_bus_speed_66;
- break;
- case E1000_STATUS_PCIX_SPEED_100:
- hw->bus_speed = e1000_bus_speed_100;
- break;
- case E1000_STATUS_PCIX_SPEED_133:
- hw->bus_speed = e1000_bus_speed_133;
- break;
- default:
- hw->bus_speed = e1000_bus_speed_reserved;
- break;
- }
- }
- hw->bus_width = (status & E1000_STATUS_BUS64) ?
- e1000_bus_width_64 : e1000_bus_width_32;
- break;
- }
+ u32 status;
+
+ switch (hw->mac_type) {
+ case e1000_82542_rev2_0:
+ case e1000_82542_rev2_1:
+ hw->bus_type = e1000_bus_type_pci;
+ hw->bus_speed = e1000_bus_speed_unknown;
+ hw->bus_width = e1000_bus_width_unknown;
+ break;
+ default:
+ status = er32(STATUS);
+ hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+ e1000_bus_type_pcix : e1000_bus_type_pci;
+
+ if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+ hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+ e1000_bus_speed_66 : e1000_bus_speed_120;
+ } else if (hw->bus_type == e1000_bus_type_pci) {
+ hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+ e1000_bus_speed_66 : e1000_bus_speed_33;
+ } else {
+ switch (status & E1000_STATUS_PCIX_SPEED) {
+ case E1000_STATUS_PCIX_SPEED_66:
+ hw->bus_speed = e1000_bus_speed_66;
+ break;
+ case E1000_STATUS_PCIX_SPEED_100:
+ hw->bus_speed = e1000_bus_speed_100;
+ break;
+ case E1000_STATUS_PCIX_SPEED_133:
+ hw->bus_speed = e1000_bus_speed_133;
+ break;
+ default:
+ hw->bus_speed = e1000_bus_speed_reserved;
+ break;
+ }
+ }
+ hw->bus_width = (status & E1000_STATUS_BUS64) ?
+ e1000_bus_width_64 : e1000_bus_width_32;
+ break;
+ }
}
-/******************************************************************************
+/**
+ * e1000_write_reg_io
+ * @hw: Struct containing variables accessed by shared code
+ * @offset: offset to write to
+ * @value: value to write
+ *
* Writes a value to one of the devices registers using port I/O (as opposed to
* memory mapped I/O). Only 82544 and newer devices support port I/O.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset to write to
- * value - value to write
- *****************************************************************************/
+ */
static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
{
- unsigned long io_addr = hw->io_base;
- unsigned long io_data = hw->io_base + 4;
+ unsigned long io_addr = hw->io_base;
+ unsigned long io_data = hw->io_base + 4;
- e1000_io_write(hw, io_addr, offset);
- e1000_io_write(hw, io_data, value);
+ e1000_io_write(hw, io_addr, offset);
+ e1000_io_write(hw, io_data, value);
}
-/******************************************************************************
- * Estimates the cable length.
- *
- * hw - Struct containing variables accessed by shared code
- * min_length - The estimated minimum length
- * max_length - The estimated maximum length
+/**
+ * e1000_get_cable_length - Estimates the cable length.
+ * @hw: Struct containing variables accessed by shared code
+ * @min_length: The estimated minimum length
+ * @max_length: The estimated maximum length
*
* returns: - E1000_ERR_XXX
* E1000_SUCCESS
@@ -6528,185 +4842,115 @@ static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value)
* So for M88 phy's, this function interprets the one value returned from the
* register to the minimum and maximum range.
* For IGP phy's, the function calculates the range by the AGC registers.
- *****************************************************************************/
+ */
static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 *max_length)
{
- s32 ret_val;
- u16 agc_value = 0;
- u16 i, phy_data;
- u16 cable_length;
-
- DEBUGFUNC("e1000_get_cable_length");
-
- *min_length = *max_length = 0;
-
- /* Use old method for Phy older than IGP */
- if (hw->phy_type == e1000_phy_m88) {
-
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
- cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-
- /* Convert the enum value to ranged values */
- switch (cable_length) {
- case e1000_cable_length_50:
- *min_length = 0;
- *max_length = e1000_igp_cable_length_50;
- break;
- case e1000_cable_length_50_80:
- *min_length = e1000_igp_cable_length_50;
- *max_length = e1000_igp_cable_length_80;
- break;
- case e1000_cable_length_80_110:
- *min_length = e1000_igp_cable_length_80;
- *max_length = e1000_igp_cable_length_110;
- break;
- case e1000_cable_length_110_140:
- *min_length = e1000_igp_cable_length_110;
- *max_length = e1000_igp_cable_length_140;
- break;
- case e1000_cable_length_140:
- *min_length = e1000_igp_cable_length_140;
- *max_length = e1000_igp_cable_length_170;
- break;
- default:
- return -E1000_ERR_PHY;
- break;
- }
- } else if (hw->phy_type == e1000_phy_gg82563) {
- ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
- &phy_data);
- if (ret_val)
- return ret_val;
- cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
-
- switch (cable_length) {
- case e1000_gg_cable_length_60:
- *min_length = 0;
- *max_length = e1000_igp_cable_length_60;
- break;
- case e1000_gg_cable_length_60_115:
- *min_length = e1000_igp_cable_length_60;
- *max_length = e1000_igp_cable_length_115;
- break;
- case e1000_gg_cable_length_115_150:
- *min_length = e1000_igp_cable_length_115;
- *max_length = e1000_igp_cable_length_150;
- break;
- case e1000_gg_cable_length_150:
- *min_length = e1000_igp_cable_length_150;
- *max_length = e1000_igp_cable_length_180;
- break;
- default:
- return -E1000_ERR_PHY;
- break;
- }
- } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
- u16 cur_agc_value;
- u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
- u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- {IGP01E1000_PHY_AGC_A,
- IGP01E1000_PHY_AGC_B,
- IGP01E1000_PHY_AGC_C,
- IGP01E1000_PHY_AGC_D};
- /* Read the AGC registers for all channels */
- for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
-
- ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
- if (ret_val)
- return ret_val;
-
- cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
-
- /* Value bound check. */
- if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
- (cur_agc_value == 0))
- return -E1000_ERR_PHY;
-
- agc_value += cur_agc_value;
-
- /* Update minimal AGC value. */
- if (min_agc_value > cur_agc_value)
- min_agc_value = cur_agc_value;
- }
-
- /* Remove the minimal AGC result for length < 50m */
- if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
- agc_value -= min_agc_value;
-
- /* Get the average length of the remaining 3 channels */
- agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
- } else {
- /* Get the average length of all the 4 channels. */
- agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
- }
-
- /* Set the range of the calculated length. */
- *min_length = ((e1000_igp_cable_length_table[agc_value] -
- IGP01E1000_AGC_RANGE) > 0) ?
- (e1000_igp_cable_length_table[agc_value] -
- IGP01E1000_AGC_RANGE) : 0;
- *max_length = e1000_igp_cable_length_table[agc_value] +
- IGP01E1000_AGC_RANGE;
- } else if (hw->phy_type == e1000_phy_igp_2 ||
- hw->phy_type == e1000_phy_igp_3) {
- u16 cur_agc_index, max_agc_index = 0;
- u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
- u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
- {IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D};
- /* Read the AGC registers for all channels */
- for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
- ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Getting bits 15:9, which represent the combination of course and
- * fine gain values. The result is a number that can be put into
- * the lookup table to obtain the approximate cable length. */
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
-
- /* Array index bound check. */
- if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
- (cur_agc_index == 0))
- return -E1000_ERR_PHY;
-
- /* Remove min & max AGC values from calculation. */
- if (e1000_igp_2_cable_length_table[min_agc_index] >
- e1000_igp_2_cable_length_table[cur_agc_index])
- min_agc_index = cur_agc_index;
- if (e1000_igp_2_cable_length_table[max_agc_index] <
- e1000_igp_2_cable_length_table[cur_agc_index])
- max_agc_index = cur_agc_index;
-
- agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
- }
-
- agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
- e1000_igp_2_cable_length_table[max_agc_index]);
- agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
-
- /* Calculate cable length with the error range of +/- 10 meters. */
- *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
- *max_length = agc_value + IGP02E1000_AGC_RANGE;
- }
-
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 agc_value = 0;
+ u16 i, phy_data;
+ u16 cable_length;
+
+ DEBUGFUNC("e1000_get_cable_length");
+
+ *min_length = *max_length = 0;
+
+ /* Use old method for Phy older than IGP */
+ if (hw->phy_type == e1000_phy_m88) {
+
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+ /* Convert the enum value to ranged values */
+ switch (cable_length) {
+ case e1000_cable_length_50:
+ *min_length = 0;
+ *max_length = e1000_igp_cable_length_50;
+ break;
+ case e1000_cable_length_50_80:
+ *min_length = e1000_igp_cable_length_50;
+ *max_length = e1000_igp_cable_length_80;
+ break;
+ case e1000_cable_length_80_110:
+ *min_length = e1000_igp_cable_length_80;
+ *max_length = e1000_igp_cable_length_110;
+ break;
+ case e1000_cable_length_110_140:
+ *min_length = e1000_igp_cable_length_110;
+ *max_length = e1000_igp_cable_length_140;
+ break;
+ case e1000_cable_length_140:
+ *min_length = e1000_igp_cable_length_140;
+ *max_length = e1000_igp_cable_length_170;
+ break;
+ default:
+ return -E1000_ERR_PHY;
+ break;
+ }
+ } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+ u16 cur_agc_value;
+ u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+ u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ { IGP01E1000_PHY_AGC_A,
+ IGP01E1000_PHY_AGC_B,
+ IGP01E1000_PHY_AGC_C,
+ IGP01E1000_PHY_AGC_D
+ };
+ /* Read the AGC registers for all channels */
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+ ret_val =
+ e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+ /* Value bound check. */
+ if ((cur_agc_value >=
+ IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
+ || (cur_agc_value == 0))
+ return -E1000_ERR_PHY;
+
+ agc_value += cur_agc_value;
+
+ /* Update minimal AGC value. */
+ if (min_agc_value > cur_agc_value)
+ min_agc_value = cur_agc_value;
+ }
+
+ /* Remove the minimal AGC result for length < 50m */
+ if (agc_value <
+ IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+ agc_value -= min_agc_value;
+
+ /* Get the average length of the remaining 3 channels */
+ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+ } else {
+ /* Get the average length of all the 4 channels. */
+ agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+ }
+
+ /* Set the range of the calculated length. */
+ *min_length = ((e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) > 0) ?
+ (e1000_igp_cable_length_table[agc_value] -
+ IGP01E1000_AGC_RANGE) : 0;
+ *max_length = e1000_igp_cable_length_table[agc_value] +
+ IGP01E1000_AGC_RANGE;
+ }
+
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Check the cable polarity
- *
- * hw - Struct containing variables accessed by shared code
- * polarity - output parameter : 0 - Polarity is not reversed
+/**
+ * e1000_check_polarity - Check the cable polarity
+ * @hw: Struct containing variables accessed by shared code
+ * @polarity: output parameter : 0 - Polarity is not reversed
* 1 - Polarity is reversed.
*
* returns: - E1000_ERR_XXX
@@ -6717,73 +4961,65 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
* 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will
* return 0. If the link speed is 1000 Mbps the polarity status is in the
* IGP01E1000_PHY_PCS_INIT_REG.
- *****************************************************************************/
+ */
static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity *polarity)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_check_polarity");
-
- if ((hw->phy_type == e1000_phy_m88) ||
- (hw->phy_type == e1000_phy_gg82563)) {
- /* return the Polarity bit in the Status register. */
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
- *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
- M88E1000_PSSR_REV_POLARITY_SHIFT) ?
- e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
-
- } else if (hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2) {
- /* Read the Status register to check the speed */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
- if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
- IGP01E1000_PSSR_SPEED_1000MBPS) {
-
- /* Read the GIG initialization PCS register (0x00B4) */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- /* Check the polarity bits */
- *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
- e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
- } else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
- *polarity = (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
- e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
- }
- } else if (hw->phy_type == e1000_phy_ife) {
- ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
- &phy_data);
- if (ret_val)
- return ret_val;
- *polarity = ((phy_data & IFE_PESC_POLARITY_REVERSED) >>
- IFE_PESC_POLARITY_REVERSED_SHIFT) ?
- e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
- }
- return E1000_SUCCESS;
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_check_polarity");
+
+ if (hw->phy_type == e1000_phy_m88) {
+ /* return the Polarity bit in the Status register. */
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+ *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >>
+ M88E1000_PSSR_REV_POLARITY_SHIFT) ?
+ e1000_rev_polarity_reversed : e1000_rev_polarity_normal;
+
+ } else if (hw->phy_type == e1000_phy_igp) {
+ /* Read the Status register to check the speed */
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+ * find the polarity status */
+ if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+ IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+ /* Read the GIG initialization PCS register (0x00B4) */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check the polarity bits */
+ *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ } else {
+ /* For 10 Mbps, read the polarity bit in the status register. (for
+ * 100 Mbps this bit is always 0) */
+ *polarity =
+ (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
+ e1000_rev_polarity_reversed :
+ e1000_rev_polarity_normal;
+ }
+ }
+ return E1000_SUCCESS;
}
-/******************************************************************************
- * Check if Downshift occured
- *
- * hw - Struct containing variables accessed by shared code
- * downshift - output parameter : 0 - No Downshift ocured.
- * 1 - Downshift ocured.
+/**
+ * e1000_check_downshift - Check if Downshift occurred
+ * @hw: Struct containing variables accessed by shared code
+ * @downshift: output parameter : 0 - No Downshift occurred.
+ * 1 - Downshift occurred.
*
* returns: - E1000_ERR_XXX
* E1000_SUCCESS
@@ -6792,2041 +5028,607 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
* Specific Status register. For IGP phy's, it reads the Downgrade bit in the
* Link Health register. In IGP this bit is latched high, so the driver must
* read it immediately after link is established.
- *****************************************************************************/
+ */
static s32 e1000_check_downshift(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 phy_data;
-
- DEBUGFUNC("e1000_check_downshift");
-
- if (hw->phy_type == e1000_phy_igp ||
- hw->phy_type == e1000_phy_igp_3 ||
- hw->phy_type == e1000_phy_igp_2) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
- } else if ((hw->phy_type == e1000_phy_m88) ||
- (hw->phy_type == e1000_phy_gg82563)) {
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
- M88E1000_PSSR_DOWNSHIFT_SHIFT;
- } else if (hw->phy_type == e1000_phy_ife) {
- /* e1000_phy_ife supports 10/100 speed only */
- hw->speed_downgraded = false;
- }
-
- return E1000_SUCCESS;
-}
+ s32 ret_val;
+ u16 phy_data;
-/*****************************************************************************
- *
- * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
- * gigabit link is achieved to improve link quality.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_PHY if fail to read/write the PHY
- * E1000_SUCCESS at any other case.
- *
- ****************************************************************************/
+ DEBUGFUNC("e1000_check_downshift");
-static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
-{
- s32 ret_val;
- u16 phy_data, phy_saved_data, speed, duplex, i;
- u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
- {IGP01E1000_PHY_AGC_PARAM_A,
- IGP01E1000_PHY_AGC_PARAM_B,
- IGP01E1000_PHY_AGC_PARAM_C,
- IGP01E1000_PHY_AGC_PARAM_D};
- u16 min_length, max_length;
-
- DEBUGFUNC("e1000_config_dsp_after_link_change");
-
- if (hw->phy_type != e1000_phy_igp)
- return E1000_SUCCESS;
-
- if (link_up) {
- ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
- if (ret_val) {
- DEBUGOUT("Error getting link speed and duplex\n");
- return ret_val;
- }
-
- if (speed == SPEED_1000) {
-
- ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
- if (ret_val)
- return ret_val;
-
- if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
- min_length >= e1000_igp_cable_length_50) {
-
- for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
- ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
-
- ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
- phy_data);
- if (ret_val)
- return ret_val;
- }
- hw->dsp_config_state = e1000_dsp_config_activated;
- }
-
- if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
- (min_length < e1000_igp_cable_length_50)) {
-
- u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
- u32 idle_errs = 0;
-
- /* clear previous idle error counts */
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- for (i = 0; i < ffe_idle_err_timeout; i++) {
- udelay(1000);
- ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
- if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
- hw->ffe_config_state = e1000_ffe_config_active;
-
- ret_val = e1000_write_phy_reg(hw,
- IGP01E1000_PHY_DSP_FFE,
- IGP01E1000_PHY_DSP_FFE_CM_CP);
- if (ret_val)
- return ret_val;
- break;
- }
-
- if (idle_errs)
- ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
- }
- }
- }
- } else {
- if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
- if (ret_val)
- return ret_val;
-
- /* Disable the PHY transmitter */
- ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
-
- if (ret_val)
- return ret_val;
-
- mdelay(20);
-
- ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
- if (ret_val)
- return ret_val;
- for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
- ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
- phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
-
- ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
- if (ret_val)
- return ret_val;
- }
-
- ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
- if (ret_val)
- return ret_val;
-
- mdelay(20);
-
- /* Now enable the transmitter */
- ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
- if (ret_val)
- return ret_val;
-
- hw->dsp_config_state = e1000_dsp_config_enabled;
- }
-
- if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
-
- if (ret_val)
- return ret_val;
-
- /* Disable the PHY transmitter */
- ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
-
- if (ret_val)
- return ret_val;
-
- mdelay(20);
-
- ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
- if (ret_val)
- return ret_val;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
- IGP01E1000_PHY_DSP_FFE_DEFAULT);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
- if (ret_val)
- return ret_val;
-
- mdelay(20);
-
- /* Now enable the transmitter */
- ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
-
- if (ret_val)
- return ret_val;
-
- hw->ffe_config_state = e1000_ffe_config_enabled;
- }
- }
- return E1000_SUCCESS;
-}
+ if (hw->phy_type == e1000_phy_igp) {
+ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
-/*****************************************************************************
- * Set PHY to class A mode
- * Assumes the following operations will follow to enable the new class mode.
- * 1. Do a PHY soft reset
- * 2. Restart auto-negotiation or force link.
- *
- * hw - Struct containing variables accessed by shared code
- ****************************************************************************/
-static s32 e1000_set_phy_mode(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 eeprom_data;
-
- DEBUGFUNC("e1000_set_phy_mode");
-
- if ((hw->mac_type == e1000_82545_rev_3) &&
- (hw->media_type == e1000_media_type_copper)) {
- ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
- if (ret_val) {
- return ret_val;
- }
-
- if ((eeprom_data != EEPROM_RESERVED_WORD) &&
- (eeprom_data & EEPROM_PHY_CLASS_A)) {
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
- if (ret_val)
- return ret_val;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
- if (ret_val)
- return ret_val;
-
- hw->phy_reset_disable = false;
- }
- }
-
- return E1000_SUCCESS;
-}
+ hw->speed_downgraded =
+ (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+ } else if (hw->phy_type == e1000_phy_m88) {
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
-/*****************************************************************************
- *
- * This function sets the lplu state according to the active flag. When
- * activating lplu this function also disables smart speed and vise versa.
- * lplu will not be activated unless the device autonegotiation advertisment
- * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
- * hw: Struct containing variables accessed by shared code
- * active - true to enable lplu false to disable lplu.
- *
- * returns: - E1000_ERR_PHY if fail to read/write the PHY
- * E1000_SUCCESS at any other case.
- *
- ****************************************************************************/
+ hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+ M88E1000_PSSR_DOWNSHIFT_SHIFT;
+ }
-static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
-{
- u32 phy_ctrl = 0;
- s32 ret_val;
- u16 phy_data;
- DEBUGFUNC("e1000_set_d3_lplu_state");
-
- if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
- && hw->phy_type != e1000_phy_igp_3)
- return E1000_SUCCESS;
-
- /* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
- if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
- if (ret_val)
- return ret_val;
- } else if (hw->mac_type == e1000_ich8lan) {
- /* MAC writes into PHY register based on the state transition
- * and start auto-negotiation. SW driver can overwrite the settings
- * in CSR PHY power control E1000_PHY_CTRL register. */
- phy_ctrl = er32(PHY_CTRL);
- } else {
- ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
- if (ret_val)
- return ret_val;
- }
-
- if (!active) {
- if (hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547_rev_2) {
- phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
- if (ret_val)
- return ret_val;
- } else {
- if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
- ew32(PHY_CTRL, phy_ctrl);
- } else {
- phy_data &= ~IGP02E1000_PM_D3_LPLU;
- ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- phy_data);
- if (ret_val)
- return ret_val;
- }
- }
-
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
- if (hw->smart_speed == e1000_smart_speed_on) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if (ret_val)
- return ret_val;
- } else if (hw->smart_speed == e1000_smart_speed_off) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if (ret_val)
- return ret_val;
- }
-
- } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
- (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
- (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
-
- if (hw->mac_type == e1000_82541_rev_2 ||
- hw->mac_type == e1000_82547_rev_2) {
- phy_data |= IGP01E1000_GMII_FLEX_SPD;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
- if (ret_val)
- return ret_val;
- } else {
- if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
- ew32(PHY_CTRL, phy_ctrl);
- } else {
- phy_data |= IGP02E1000_PM_D3_LPLU;
- ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- phy_data);
- if (ret_val)
- return ret_val;
- }
- }
-
- /* When LPLU is enabled we should disable SmartSpeed */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
- if (ret_val)
- return ret_val;
-
- }
- return E1000_SUCCESS;
+ return E1000_SUCCESS;
}
-/*****************************************************************************
- *
- * This function sets the lplu d0 state according to the active flag. When
- * activating lplu this function also disables smart speed and vise versa.
- * lplu will not be activated unless the device autonegotiation advertisment
- * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
- * hw: Struct containing variables accessed by shared code
- * active - true to enable lplu false to disable lplu.
+/**
+ * e1000_config_dsp_after_link_change
+ * @hw: Struct containing variables accessed by shared code
+ * @link_up: was link up at the time this was called
*
* returns: - E1000_ERR_PHY if fail to read/write the PHY
* E1000_SUCCESS at any other case.
*
- ****************************************************************************/
-
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
- u32 phy_ctrl = 0;
- s32 ret_val;
- u16 phy_data;
- DEBUGFUNC("e1000_set_d0_lplu_state");
-
- if (hw->mac_type <= e1000_82547_rev_2)
- return E1000_SUCCESS;
-
- if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl = er32(PHY_CTRL);
- } else {
- ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
- if (ret_val)
- return ret_val;
- }
-
- if (!active) {
- if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
- ew32(PHY_CTRL, phy_ctrl);
- } else {
- phy_data &= ~IGP02E1000_PM_D0_LPLU;
- ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
- if (ret_val)
- return ret_val;
- }
-
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
- if (hw->smart_speed == e1000_smart_speed_on) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if (ret_val)
- return ret_val;
- } else if (hw->smart_speed == e1000_smart_speed_off) {
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- phy_data);
- if (ret_val)
- return ret_val;
- }
-
-
- } else {
-
- if (hw->mac_type == e1000_ich8lan) {
- phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
- ew32(PHY_CTRL, phy_ctrl);
- } else {
- phy_data |= IGP02E1000_PM_D0_LPLU;
- ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
- if (ret_val)
- return ret_val;
- }
-
- /* When LPLU is enabled we should disable SmartSpeed */
- ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
- if (ret_val)
- return ret_val;
-
- phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
- if (ret_val)
- return ret_val;
-
- }
- return E1000_SUCCESS;
-}
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ */
-/******************************************************************************
- * Change VCO speed register to improve Bit Error Rate performance of SERDES.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_set_vco_speed(struct e1000_hw *hw)
+static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
{
- s32 ret_val;
- u16 default_page = 0;
- u16 phy_data;
-
- DEBUGFUNC("e1000_set_vco_speed");
+ s32 ret_val;
+ u16 phy_data, phy_saved_data, speed, duplex, i;
+ u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+ { IGP01E1000_PHY_AGC_PARAM_A,
+ IGP01E1000_PHY_AGC_PARAM_B,
+ IGP01E1000_PHY_AGC_PARAM_C,
+ IGP01E1000_PHY_AGC_PARAM_D
+ };
+ u16 min_length, max_length;
+
+ DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ if (link_up) {
+ ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val) {
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
+ }
- switch (hw->mac_type) {
- case e1000_82545_rev_3:
- case e1000_82546_rev_3:
- break;
- default:
- return E1000_SUCCESS;
- }
+ if (speed == SPEED_1000) {
+
+ ret_val =
+ e1000_get_cable_length(hw, &min_length,
+ &max_length);
+ if (ret_val)
+ return ret_val;
+
+ if ((hw->dsp_config_state == e1000_dsp_config_enabled)
+ && min_length >= e1000_igp_cable_length_50) {
+
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw,
+ dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &=
+ ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ dsp_reg_array
+ [i], phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ hw->dsp_config_state =
+ e1000_dsp_config_activated;
+ }
+
+ if ((hw->ffe_config_state == e1000_ffe_config_enabled)
+ && (min_length < e1000_igp_cable_length_50)) {
+
+ u16 ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+ u32 idle_errs = 0;
+
+ /* clear previous idle error counts */
+ ret_val =
+ e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ for (i = 0; i < ffe_idle_err_timeout; i++) {
+ udelay(1000);
+ ret_val =
+ e1000_read_phy_reg(hw,
+ PHY_1000T_STATUS,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ idle_errs +=
+ (phy_data &
+ SR_1000T_IDLE_ERROR_CNT);
+ if (idle_errs >
+ SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT)
+ {
+ hw->ffe_config_state =
+ e1000_ffe_config_active;
+
+ ret_val =
+ e1000_write_phy_reg(hw,
+ IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_CM_CP);
+ if (ret_val)
+ return ret_val;
+ break;
+ }
+
+ if (idle_errs)
+ ffe_idle_err_timeout =
+ FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+ }
+ }
+ }
+ } else {
+ if (hw->dsp_config_state == e1000_dsp_config_activated) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+ ret_val =
+ e1000_read_phy_reg(hw, dsp_reg_array[i],
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+ phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+ ret_val =
+ e1000_write_phy_reg(hw, dsp_reg_array[i],
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
+
+ mdelay(20);
+
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+ if (ret_val)
+ return ret_val;
+
+ hw->dsp_config_state = e1000_dsp_config_enabled;
+ }
- /* Set PHY register 30, page 5, bit 8 to 0 */
+ if (hw->ffe_config_state == e1000_ffe_config_active) {
+ /* Save off the current value of register 0x2F5B to be restored at
+ * the end of the routines. */
+ ret_val =
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
- if (ret_val)
- return ret_val;
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
- if (ret_val)
- return ret_val;
+ /* Disable the PHY transmitter */
+ ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
+ if (ret_val)
+ return ret_val;
- phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
+ mdelay(20);
- /* Set PHY register 30, page 4, bit 11 to 1 */
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_FORCE_GIGA);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+ IGP01E1000_PHY_DSP_FFE_DEFAULT);
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
- if (ret_val)
- return ret_val;
+ ret_val = e1000_write_phy_reg(hw, 0x0000,
+ IGP01E1000_IEEE_RESTART_AUTONEG);
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
- if (ret_val)
- return ret_val;
+ mdelay(20);
- phy_data |= M88E1000_PHY_VCO_REG_BIT11;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
- if (ret_val)
- return ret_val;
+ /* Now enable the transmitter */
+ ret_val =
+ e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
- if (ret_val)
- return ret_val;
+ if (ret_val)
+ return ret_val;
- return E1000_SUCCESS;
+ hw->ffe_config_state = e1000_ffe_config_enabled;
+ }
+ }
+ return E1000_SUCCESS;
}
-
-/*****************************************************************************
- * This function reads the cookie from ARC ram.
+/**
+ * e1000_set_phy_mode - Set PHY to class A mode
+ * @hw: Struct containing variables accessed by shared code
*
- * returns: - E1000_SUCCESS .
- ****************************************************************************/
-static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer)
+ * Assumes the following operations will follow to enable the new class mode.
+ * 1. Do a PHY soft reset
+ * 2. Restart auto-negotiation or force link.
+ */
+static s32 e1000_set_phy_mode(struct e1000_hw *hw)
{
- u8 i;
- u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
- u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
-
- length = (length >> 2);
- offset = (offset >> 2);
-
- for (i = 0; i < length; i++) {
- *((u32 *)buffer + i) =
- E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
- }
- return E1000_SUCCESS;
-}
+ s32 ret_val;
+ u16 eeprom_data;
+ DEBUGFUNC("e1000_set_phy_mode");
-/*****************************************************************************
- * This function checks whether the HOST IF is enabled for command operaton
- * and also checks whether the previous command is completed.
- * It busy waits in case of previous command is not completed.
- *
- * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
- * timeout
- * - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
-{
- u32 hicr;
- u8 i;
-
- /* Check that the host interface is enabled. */
- hicr = er32(HICR);
- if ((hicr & E1000_HICR_EN) == 0) {
- DEBUGOUT("E1000_HOST_EN bit disabled.\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
- /* check the previous command is completed */
- for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
- hicr = er32(HICR);
- if (!(hicr & E1000_HICR_C))
- break;
- mdelay(1);
- }
-
- if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
- DEBUGOUT("Previous command timeout failed .\n");
- return -E1000_ERR_HOST_INTERFACE_COMMAND;
- }
- return E1000_SUCCESS;
-}
+ if ((hw->mac_type == e1000_82545_rev_3) &&
+ (hw->media_type == e1000_media_type_copper)) {
+ ret_val =
+ e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
+ &eeprom_data);
+ if (ret_val) {
+ return ret_val;
+ }
-/*****************************************************************************
- * This function writes the buffer content at the offset given on the host if.
- * It also does alignment considerations to do the writes in most efficient way.
- * Also fills up the sum of the buffer in *buffer parameter.
- *
- * returns - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
- u16 offset, u8 *sum)
-{
- u8 *tmp;
- u8 *bufptr = buffer;
- u32 data = 0;
- u16 remaining, i, j, prev_bytes;
-
- /* sum = only sum of the data and it is not checksum */
-
- if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
- return -E1000_ERR_PARAM;
- }
-
- tmp = (u8 *)&data;
- prev_bytes = offset & 0x3;
- offset &= 0xFFFC;
- offset >>= 2;
-
- if (prev_bytes) {
- data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
- for (j = prev_bytes; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
- length -= j - prev_bytes;
- offset++;
- }
-
- remaining = length & 0x3;
- length -= remaining;
-
- /* Calculate length in DWORDs */
- length >>= 2;
-
- /* The device driver writes the relevant command block into the
- * ram area. */
- for (i = 0; i < length; i++) {
- for (j = 0; j < sizeof(u32); j++) {
- *(tmp + j) = *bufptr++;
- *sum += *(tmp + j);
- }
-
- E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
- }
- if (remaining) {
- for (j = 0; j < sizeof(u32); j++) {
- if (j < remaining)
- *(tmp + j) = *bufptr++;
- else
- *(tmp + j) = 0;
-
- *sum += *(tmp + j);
- }
- E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
- }
-
- return E1000_SUCCESS;
-}
+ if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+ (eeprom_data & EEPROM_PHY_CLASS_A)) {
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+ 0x000B);
+ if (ret_val)
+ return ret_val;
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL,
+ 0x8104);
+ if (ret_val)
+ return ret_val;
+
+ hw->phy_reset_disable = false;
+ }
+ }
+ return E1000_SUCCESS;
+}
-/*****************************************************************************
- * This function writes the command header after does the checksum calculation.
+/**
+ * e1000_set_d3_lplu_state - set d3 link power state
+ * @hw: Struct containing variables accessed by shared code
+ * @active: true to enable lplu false to disable lplu.
+ *
+ * This function sets the lplu state according to the active flag. When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisement
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
*
- * returns - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
- struct e1000_host_mng_command_header *hdr)
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ * E1000_SUCCESS at any other case.
+ */
+static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
- u16 i;
- u8 sum;
- u8 *buffer;
-
- /* Write the whole command header structure which includes sum of
- * the buffer */
-
- u16 length = sizeof(struct e1000_host_mng_command_header);
+ s32 ret_val;
+ u16 phy_data;
+ DEBUGFUNC("e1000_set_d3_lplu_state");
+
+ if (hw->phy_type != e1000_phy_igp)
+ return E1000_SUCCESS;
+
+ /* During driver activity LPLU should not be used or it will attain link
+ * from the lowest speeds starting from 10Mbps. The capability is used for
+ * Dx transitions and states */
+ if (hw->mac_type == e1000_82541_rev_2
+ || hw->mac_type == e1000_82547_rev_2) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+ if (ret_val)
+ return ret_val;
+ }
- sum = hdr->checksum;
- hdr->checksum = 0;
+ if (!active) {
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
- buffer = (u8 *)hdr;
- i = length;
- while (i--)
- sum += buffer[i];
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
+ * Dx states where the power conservation is most important. During
+ * driver activity we should enable SmartSpeed, so performance is
+ * maintained. */
+ if (hw->smart_speed == e1000_smart_speed_on) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ } else if (hw->smart_speed == e1000_smart_speed_off) {
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
+ } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
+ || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
+ || (hw->autoneg_advertised ==
+ AUTONEG_ADVERTISE_10_100_ALL)) {
+
+ if (hw->mac_type == e1000_82541_rev_2 ||
+ hw->mac_type == e1000_82547_rev_2) {
+ phy_data |= IGP01E1000_GMII_FLEX_SPD;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+ phy_data);
+ if (ret_val)
+ return ret_val;
+ }
- hdr->checksum = 0 - sum;
+ /* When LPLU is enabled we should disable SmartSpeed */
+ ret_val =
+ e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ &phy_data);
+ if (ret_val)
+ return ret_val;
- length >>= 2;
- /* The device driver writes the relevant command block into the ram area. */
- for (i = 0; i < length; i++) {
- E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *)hdr + i));
- E1000_WRITE_FLUSH();
- }
+ phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+ ret_val =
+ e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ phy_data);
+ if (ret_val)
+ return ret_val;
- return E1000_SUCCESS;
+ }
+ return E1000_SUCCESS;
}
-
-/*****************************************************************************
- * This function indicates to ARC that a new command is pending which completes
- * one write operation by the driver.
+/**
+ * e1000_set_vco_speed
+ * @hw: Struct containing variables accessed by shared code
*
- * returns - E1000_SUCCESS for success.
- ****************************************************************************/
-static s32 e1000_mng_write_commit(struct e1000_hw *hw)
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ */
+static s32 e1000_set_vco_speed(struct e1000_hw *hw)
{
- u32 hicr;
+ s32 ret_val;
+ u16 default_page = 0;
+ u16 phy_data;
- hicr = er32(HICR);
- /* Setting this bit tells the ARC that a new command is pending. */
- ew32(HICR, hicr | E1000_HICR_C);
+ DEBUGFUNC("e1000_set_vco_speed");
- return E1000_SUCCESS;
-}
+ switch (hw->mac_type) {
+ case e1000_82545_rev_3:
+ case e1000_82546_rev_3:
+ break;
+ default:
+ return E1000_SUCCESS;
+ }
+ /* Set PHY register 30, page 5, bit 8 to 0 */
-/*****************************************************************************
- * This function checks the mode of the firmware.
- *
- * returns - true when the mode is IAMT or false.
- ****************************************************************************/
-bool e1000_check_mng_mode(struct e1000_hw *hw)
-{
- u32 fwsm;
-
- fwsm = er32(FWSM);
+ ret_val =
+ e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+ if (ret_val)
+ return ret_val;
- if (hw->mac_type == e1000_ich8lan) {
- if ((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
- return true;
- } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
- return true;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+ if (ret_val)
+ return ret_val;
- return false;
-}
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
-/*****************************************************************************
- * This function writes the dhcp info .
- ****************************************************************************/
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
-{
- s32 ret_val;
- struct e1000_host_mng_command_header hdr;
-
- hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
- hdr.command_length = length;
- hdr.reserved1 = 0;
- hdr.reserved2 = 0;
- hdr.checksum = 0;
-
- ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val == E1000_SUCCESS) {
- ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
- &(hdr.checksum));
- if (ret_val == E1000_SUCCESS) {
- ret_val = e1000_mng_write_cmd_header(hw, &hdr);
- if (ret_val == E1000_SUCCESS)
- ret_val = e1000_mng_write_commit(hw);
- }
- }
- return ret_val;
-}
+ /* Set PHY register 30, page 4, bit 11 to 1 */
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+ if (ret_val)
+ return ret_val;
-/*****************************************************************************
- * This function calculates the checksum.
- *
- * returns - checksum of buffer contents.
- ****************************************************************************/
-static u8 e1000_calculate_mng_checksum(char *buffer, u32 length)
-{
- u8 sum = 0;
- u32 i;
+ ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+ if (ret_val)
+ return ret_val;
- if (!buffer)
- return 0;
+ phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+ if (ret_val)
+ return ret_val;
- for (i=0; i < length; i++)
- sum += buffer[i];
+ ret_val =
+ e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+ if (ret_val)
+ return ret_val;
- return (u8)(0 - sum);
+ return E1000_SUCCESS;
}
-/*****************************************************************************
- * This function checks whether tx pkt filtering needs to be enabled or not.
- *
- * returns - true for packet filtering or false.
- ****************************************************************************/
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
-{
- /* called in init as well as watchdog timer functions */
-
- s32 ret_val, checksum;
- bool tx_filter = false;
- struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
- u8 *buffer = (u8 *) &(hw->mng_cookie);
-
- if (e1000_check_mng_mode(hw)) {
- ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val == E1000_SUCCESS) {
- ret_val = e1000_host_if_read_cookie(hw, buffer);
- if (ret_val == E1000_SUCCESS) {
- checksum = hdr->checksum;
- hdr->checksum = 0;
- if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
- checksum == e1000_calculate_mng_checksum((char *)buffer,
- E1000_MNG_DHCP_COOKIE_LENGTH)) {
- if (hdr->status &
- E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
- tx_filter = true;
- } else
- tx_filter = true;
- } else
- tx_filter = true;
- }
- }
-
- hw->tx_pkt_filtering = tx_filter;
- return tx_filter;
-}
-/******************************************************************************
- * Verifies the hardware needs to allow ARPs to be processed by the host
- *
- * hw - Struct containing variables accessed by shared code
+/**
+ * e1000_enable_mng_pass_thru - check for bmc pass through
+ * @hw: Struct containing variables accessed by shared code
*
+ * Verifies the hardware needs to allow ARPs to be processed by the host
* returns: - true/false
- *
- *****************************************************************************/
+ */
u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw)
{
- u32 manc;
- u32 fwsm, factps;
-
- if (hw->asf_firmware_present) {
- manc = er32(MANC);
-
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
- return false;
- if (e1000_arc_subsystem_valid(hw)) {
- fwsm = er32(FWSM);
- factps = er32(FACTPS);
-
- if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
- e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
- return true;
- } else
- if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
- return true;
- }
- return false;
-}
+ u32 manc;
-static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
-{
- s32 ret_val;
- u16 mii_status_reg;
- u16 i;
-
- /* Polarity reversal workaround for forced 10F/10H links. */
-
- /* Disable the transmitter on the PHY */
-
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
- if (ret_val)
- return ret_val;
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
- if (ret_val)
- return ret_val;
-
- /* This loop will early-out if the NO link condition has been met. */
- for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Link Status bit
- * to be clear.
- */
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
- mdelay(100);
- }
-
- /* Recommended delay time after link has been lost */
- mdelay(1000);
-
- /* Now we will re-enable th transmitter on the PHY */
-
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
- if (ret_val)
- return ret_val;
- mdelay(50);
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
- if (ret_val)
- return ret_val;
- mdelay(50);
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
- if (ret_val)
- return ret_val;
- mdelay(50);
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
- if (ret_val)
- return ret_val;
-
- /* This loop will early-out if the link condition has been met. */
- for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Link Status bit
- * to be set.
- */
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
- if (ret_val)
- return ret_val;
-
- if (mii_status_reg & MII_SR_LINK_STATUS) break;
- mdelay(100);
- }
- return E1000_SUCCESS;
+ if (hw->asf_firmware_present) {
+ manc = er32(MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+ !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ return false;
+ if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+ return true;
+ }
+ return false;
}
-/***************************************************************************
- *
- * Disables PCI-Express master access.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - none.
- *
- ***************************************************************************/
-static void e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
{
- u32 ctrl;
+ s32 ret_val;
+ u16 mii_status_reg;
+ u16 i;
- DEBUGFUNC("e1000_set_pci_express_master_disable");
+ /* Polarity reversal workaround for forced 10F/10H links. */
- if (hw->bus_type != e1000_bus_type_pci_express)
- return;
+ /* Disable the transmitter on the PHY */
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
- ew32(CTRL, ctrl);
-}
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+ if (ret_val)
+ return ret_val;
-/*******************************************************************************
- *
- * Disables PCI-Express master access and verifies there are no pending requests
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
- * caused the master requests to be disabled.
- * E1000_SUCCESS master requests disabled.
- *
- ******************************************************************************/
-s32 e1000_disable_pciex_master(struct e1000_hw *hw)
-{
- s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
- DEBUGFUNC("e1000_disable_pciex_master");
+ /* This loop will early-out if the NO link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be clear.
+ */
- if (hw->bus_type != e1000_bus_type_pci_express)
- return E1000_SUCCESS;
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
- e1000_set_pci_express_master_disable(hw);
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
- while (timeout) {
- if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
- break;
- else
- udelay(100);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("Master requests are pending.\n");
- return -E1000_ERR_MASTER_REQUESTS_PENDING;
- }
+ if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+ break;
+ mdelay(100);
+ }
- return E1000_SUCCESS;
+ /* Recommended delay time after link has been lost */
+ mdelay(1000);
+
+ /* Now we will re-enable th transmitter on the PHY */
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+ if (ret_val)
+ return ret_val;
+ mdelay(50);
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+ if (ret_val)
+ return ret_val;
+
+ /* This loop will early-out if the link condition has been met. */
+ for (i = PHY_FORCE_TIME; i > 0; i--) {
+ /* Read the MII Status Register and wait for Link Status bit
+ * to be set.
+ */
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+ if (ret_val)
+ return ret_val;
+
+ if (mii_status_reg & MII_SR_LINK_STATUS)
+ break;
+ mdelay(100);
+ }
+ return E1000_SUCCESS;
}
-/*******************************************************************************
+/**
+ * e1000_get_auto_rd_done
+ * @hw: Struct containing variables accessed by shared code
*
* Check for EEPROM Auto Read bit done.
- *
- * hw: Struct containing variables accessed by shared code
- *
* returns: - E1000_ERR_RESET if fail to reset MAC
* E1000_SUCCESS at any other case.
- *
- ******************************************************************************/
+ */
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- s32 timeout = AUTO_READ_DONE_TIMEOUT;
-
- DEBUGFUNC("e1000_get_auto_rd_done");
-
- switch (hw->mac_type) {
- default:
- msleep(5);
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- while (timeout) {
- if (er32(EECD) & E1000_EECD_AUTO_RD)
- break;
- else msleep(1);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
- return -E1000_ERR_RESET;
- }
- break;
- }
-
- /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
- * Need to wait for PHY configuration completion before accessing NVM
- * and PHY. */
- if (hw->mac_type == e1000_82573)
- msleep(25);
-
- return E1000_SUCCESS;
+ DEBUGFUNC("e1000_get_auto_rd_done");
+ msleep(5);
+ return E1000_SUCCESS;
}
-/***************************************************************************
- * Checks if the PHY configuration is done
- *
- * hw: Struct containing variables accessed by shared code
+/**
+ * e1000_get_phy_cfg_done
+ * @hw: Struct containing variables accessed by shared code
*
+ * Checks if the PHY configuration is done
* returns: - E1000_ERR_RESET if fail to reset MAC
* E1000_SUCCESS at any other case.
- *
- ***************************************************************************/
+ */
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- s32 timeout = PHY_CFG_TIMEOUT;
- u32 cfg_mask = E1000_EEPROM_CFG_DONE;
-
- DEBUGFUNC("e1000_get_phy_cfg_done");
-
- switch (hw->mac_type) {
- default:
- mdelay(10);
- break;
- case e1000_80003es2lan:
- /* Separate *_CFG_DONE_* bit for each port */
- if (er32(STATUS) & E1000_STATUS_FUNC_1)
- cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
- /* Fall Through */
- case e1000_82571:
- case e1000_82572:
- while (timeout) {
- if (er32(EEMNGCTL) & cfg_mask)
- break;
- else
- msleep(1);
- timeout--;
- }
- if (!timeout) {
- DEBUGOUT("MNG configuration cycle has not completed.\n");
- return -E1000_ERR_RESET;
- }
- break;
- }
-
- return E1000_SUCCESS;
-}
-
-/***************************************************************************
- *
- * Using the combination of SMBI and SWESMBI semaphore bits when resetting
- * adapter or Eeprom access.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
- * E1000_SUCCESS at any other case.
- *
- ***************************************************************************/
-static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
-{
- s32 timeout;
- u32 swsm;
-
- DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
-
- if (!hw->eeprom_semaphore_present)
- return E1000_SUCCESS;
-
- if (hw->mac_type == e1000_80003es2lan) {
- /* Get the SW semaphore. */
- if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
- return -E1000_ERR_EEPROM;
- }
-
- /* Get the FW semaphore. */
- timeout = hw->eeprom.word_size + 1;
- while (timeout) {
- swsm = er32(SWSM);
- swsm |= E1000_SWSM_SWESMBI;
- ew32(SWSM, swsm);
- /* if we managed to set the bit we got the semaphore. */
- swsm = er32(SWSM);
- if (swsm & E1000_SWSM_SWESMBI)
- break;
-
- udelay(50);
- timeout--;
- }
-
- if (!timeout) {
- /* Release semaphores */
- e1000_put_hw_eeprom_semaphore(hw);
- DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
- return -E1000_ERR_EEPROM;
- }
-
- return E1000_SUCCESS;
-}
-
-/***************************************************************************
- * This function clears HW semaphore bits.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - None.
- *
- ***************************************************************************/
-static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
-{
- u32 swsm;
-
- DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
-
- if (!hw->eeprom_semaphore_present)
- return;
-
- swsm = er32(SWSM);
- if (hw->mac_type == e1000_80003es2lan) {
- /* Release both semaphores. */
- swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
- } else
- swsm &= ~(E1000_SWSM_SWESMBI);
- ew32(SWSM, swsm);
-}
-
-/***************************************************************************
- *
- * Obtaining software semaphore bit (SMBI) before resetting PHY.
- *
- * hw: Struct containing variables accessed by shared code
- *
- * returns: - E1000_ERR_RESET if fail to obtain semaphore.
- * E1000_SUCCESS at any other case.
- *
- ***************************************************************************/
-static s32 e1000_get_software_semaphore(struct e1000_hw *hw)
-{
- s32 timeout = hw->eeprom.word_size + 1;
- u32 swsm;
-
- DEBUGFUNC("e1000_get_software_semaphore");
-
- if (hw->mac_type != e1000_80003es2lan) {
- return E1000_SUCCESS;
- }
-
- while (timeout) {
- swsm = er32(SWSM);
- /* If SMBI bit cleared, it is now set and we hold the semaphore */
- if (!(swsm & E1000_SWSM_SMBI))
- break;
- mdelay(1);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
- return -E1000_ERR_RESET;
- }
-
- return E1000_SUCCESS;
-}
-
-/***************************************************************************
- *
- * Release semaphore bit (SMBI).
- *
- * hw: Struct containing variables accessed by shared code
- *
- ***************************************************************************/
-static void e1000_release_software_semaphore(struct e1000_hw *hw)
-{
- u32 swsm;
-
- DEBUGFUNC("e1000_release_software_semaphore");
-
- if (hw->mac_type != e1000_80003es2lan) {
- return;
- }
-
- swsm = er32(SWSM);
- /* Release the SW semaphores.*/
- swsm &= ~E1000_SWSM_SMBI;
- ew32(SWSM, swsm);
-}
-
-/******************************************************************************
- * Checks if PHY reset is blocked due to SOL/IDER session, for example.
- * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to
- * the caller to figure out how to deal with it.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * returns: - E1000_BLK_PHY_RESET
- * E1000_SUCCESS
- *
- *****************************************************************************/
-s32 e1000_check_phy_reset_block(struct e1000_hw *hw)
-{
- u32 manc = 0;
- u32 fwsm = 0;
-
- if (hw->mac_type == e1000_ich8lan) {
- fwsm = er32(FWSM);
- return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
- : E1000_BLK_PHY_RESET;
- }
-
- if (hw->mac_type > e1000_82547_rev_2)
- manc = er32(MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : E1000_SUCCESS;
-}
-
-static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw)
-{
- u32 fwsm;
-
- /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
- * may not be provided a DMA clock when no manageability features are
- * enabled. We do not want to perform any reads/writes to these registers
- * if this is the case. We read FWSM to determine the manageability mode.
- */
- switch (hw->mac_type) {
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- fwsm = er32(FWSM);
- if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
- return true;
- break;
- case e1000_ich8lan:
- return true;
- default:
- break;
- }
- return false;
-}
-
-
-/******************************************************************************
- * Configure PCI-Ex no-snoop
- *
- * hw - Struct containing variables accessed by shared code.
- * no_snoop - Bitmap of no-snoop events.
- *
- * returns: E1000_SUCCESS
- *
- *****************************************************************************/
-static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
-{
- u32 gcr_reg = 0;
-
- DEBUGFUNC("e1000_set_pci_ex_no_snoop");
-
- if (hw->bus_type == e1000_bus_type_unknown)
- e1000_get_bus_info(hw);
-
- if (hw->bus_type != e1000_bus_type_pci_express)
- return E1000_SUCCESS;
-
- if (no_snoop) {
- gcr_reg = er32(GCR);
- gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
- gcr_reg |= no_snoop;
- ew32(GCR, gcr_reg);
- }
- if (hw->mac_type == e1000_ich8lan) {
- u32 ctrl_ext;
-
- ew32(GCR, PCI_EX_82566_SNOOP_ALL);
-
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
- ew32(CTRL_EXT, ctrl_ext);
- }
-
- return E1000_SUCCESS;
-}
-
-/***************************************************************************
- *
- * Get software semaphore FLAG bit (SWFLAG).
- * SWFLAG is used to synchronize the access to all shared resource between
- * SW, FW and HW.
- *
- * hw: Struct containing variables accessed by shared code
- *
- ***************************************************************************/
-static s32 e1000_get_software_flag(struct e1000_hw *hw)
-{
- s32 timeout = PHY_CFG_TIMEOUT;
- u32 extcnf_ctrl;
-
- DEBUGFUNC("e1000_get_software_flag");
-
- if (hw->mac_type == e1000_ich8lan) {
- while (timeout) {
- extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
-
- extcnf_ctrl = er32(EXTCNF_CTRL);
- if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
- break;
- mdelay(1);
- timeout--;
- }
-
- if (!timeout) {
- DEBUGOUT("FW or HW locks the resource too long.\n");
- return -E1000_ERR_CONFIG;
- }
- }
-
- return E1000_SUCCESS;
-}
-
-/***************************************************************************
- *
- * Release software semaphore FLAG bit (SWFLAG).
- * SWFLAG is used to synchronize the access to all shared resource between
- * SW, FW and HW.
- *
- * hw: Struct containing variables accessed by shared code
- *
- ***************************************************************************/
-static void e1000_release_software_flag(struct e1000_hw *hw)
-{
- u32 extcnf_ctrl;
-
- DEBUGFUNC("e1000_release_software_flag");
-
- if (hw->mac_type == e1000_ich8lan) {
- extcnf_ctrl= er32(EXTCNF_CTRL);
- extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
- ew32(EXTCNF_CTRL, extcnf_ctrl);
- }
-
- return;
-}
-
-/******************************************************************************
- * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
- * register.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to read
- * data - word read from the EEPROM
- * words - number of words to read
- *****************************************************************************/
-static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 error = E1000_SUCCESS;
- u32 flash_bank = 0;
- u32 act_offset = 0;
- u32 bank_offset = 0;
- u16 word = 0;
- u16 i = 0;
-
- /* We need to know which is the valid flash bank. In the event
- * that we didn't allocate eeprom_shadow_ram, we may not be
- * managing flash_bank. So it cannot be trusted and needs
- * to be updated with each read.
- */
- /* Value of bit 22 corresponds to the flash bank we're on. */
- flash_bank = (er32(EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
-
- /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
- bank_offset = flash_bank * (hw->flash_bank_size * 2);
-
- error = e1000_get_software_flag(hw);
- if (error != E1000_SUCCESS)
- return error;
-
- for (i = 0; i < words; i++) {
- if (hw->eeprom_shadow_ram != NULL &&
- hw->eeprom_shadow_ram[offset+i].modified) {
- data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
- } else {
- /* The NVM part needs a byte offset, hence * 2 */
- act_offset = bank_offset + ((offset + i) * 2);
- error = e1000_read_ich8_word(hw, act_offset, &word);
- if (error != E1000_SUCCESS)
- break;
- data[i] = word;
- }
- }
-
- e1000_release_software_flag(hw);
-
- return error;
-}
-
-/******************************************************************************
- * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
- * register. Actually, writes are written to the shadow ram cache in the hw
- * structure hw->e1000_shadow_ram. e1000_commit_shadow_ram flushes this to
- * the NVM, which occurs when the NVM checksum is updated.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of word in the EEPROM to write
- * words - number of words to write
- * data - words to write to the EEPROM
- *****************************************************************************/
-static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- u32 i = 0;
- s32 error = E1000_SUCCESS;
-
- error = e1000_get_software_flag(hw);
- if (error != E1000_SUCCESS)
- return error;
-
- /* A driver can write to the NVM only if it has eeprom_shadow_ram
- * allocated. Subsequent reads to the modified words are read from
- * this cached structure as well. Writes will only go into this
- * cached structure unless it's followed by a call to
- * e1000_update_eeprom_checksum() where it will commit the changes
- * and clear the "modified" field.
- */
- if (hw->eeprom_shadow_ram != NULL) {
- for (i = 0; i < words; i++) {
- if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
- hw->eeprom_shadow_ram[offset+i].modified = true;
- hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
- } else {
- error = -E1000_ERR_EEPROM;
- break;
- }
- }
- } else {
- /* Drivers have the option to not allocate eeprom_shadow_ram as long
- * as they don't perform any NVM writes. An attempt in doing so
- * will result in this error.
- */
- error = -E1000_ERR_EEPROM;
- }
-
- e1000_release_software_flag(hw);
-
- return error;
-}
-
-/******************************************************************************
- * This function does initial flash setup so that a new read/write/erase cycle
- * can be started.
- *
- * hw - The pointer to the hw structure
- ****************************************************************************/
-static s32 e1000_ich8_cycle_init(struct e1000_hw *hw)
-{
- union ich8_hws_flash_status hsfsts;
- s32 error = E1000_ERR_EEPROM;
- s32 i = 0;
-
- DEBUGFUNC("e1000_ich8_cycle_init");
-
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-
- /* May be check the Flash Des Valid bit in Hw status */
- if (hsfsts.hsf_status.fldesvalid == 0) {
- DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.");
- return error;
- }
-
- /* Clear FCERR in Hw status by writing 1 */
- /* Clear DAEL in Hw status by writing a 1 */
- hsfsts.hsf_status.flcerr = 1;
- hsfsts.hsf_status.dael = 1;
-
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
-
- /* Either we should have a hardware SPI cycle in progress bit to check
- * against, in order to start a new cycle or FDONE bit should be changed
- * in the hardware so that it is 1 after harware reset, which can then be
- * used as an indication whether a cycle is in progress or has been
- * completed .. we should also have some software semaphore mechanism to
- * guard FDONE or the cycle in progress bit so that two threads access to
- * those bits can be sequentiallized or a way so that 2 threads dont
- * start the cycle at the same time */
-
- if (hsfsts.hsf_status.flcinprog == 0) {
- /* There is no cycle running at present, so we can start a cycle */
- /* Begin by setting Flash Cycle Done. */
- hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
- error = E1000_SUCCESS;
- } else {
- /* otherwise poll for sometime so the current cycle has a chance
- * to end before giving up. */
- for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcinprog == 0) {
- error = E1000_SUCCESS;
- break;
- }
- udelay(1);
- }
- if (error == E1000_SUCCESS) {
- /* Successful in waiting for previous cycle to timeout,
- * now set the Flash Cycle Done. */
- hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
- } else {
- DEBUGOUT("Flash controller busy, cannot get access");
- }
- }
- return error;
-}
-
-/******************************************************************************
- * This function starts a flash cycle and waits for its completion
- *
- * hw - The pointer to the hw structure
- ****************************************************************************/
-static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
-{
- union ich8_hws_flash_ctrl hsflctl;
- union ich8_hws_flash_status hsfsts;
- s32 error = E1000_ERR_EEPROM;
- u32 i = 0;
-
- /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
- hsflctl.hsf_ctrl.flcgo = 1;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
- /* wait till FDONE bit is set to 1 */
- do {
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcdone == 1)
- break;
- udelay(1);
- i++;
- } while (i < timeout);
- if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
- error = E1000_SUCCESS;
- }
- return error;
-}
-
-/******************************************************************************
- * Reads a byte or word from the NVM using the ICH8 flash access registers.
- *
- * hw - The pointer to the hw structure
- * index - The index of the byte or word to read.
- * size - Size of data to read, 1=byte 2=word
- * data - Pointer to the word to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
- u16 *data)
-{
- union ich8_hws_flash_status hsfsts;
- union ich8_hws_flash_ctrl hsflctl;
- u32 flash_linear_address;
- u32 flash_data = 0;
- s32 error = -E1000_ERR_EEPROM;
- s32 count = 0;
-
- DEBUGFUNC("e1000_read_ich8_data");
-
- if (size < 1 || size > 2 || data == NULL ||
- index > ICH_FLASH_LINEAR_ADDR_MASK)
- return error;
-
- flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
- hw->flash_base_addr;
-
- do {
- udelay(1);
- /* Steps */
- error = e1000_ich8_cycle_init(hw);
- if (error != E1000_SUCCESS)
- break;
-
- hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
- /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
- hsflctl.hsf_ctrl.fldbcount = size - 1;
- hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
- /* Write the last 24 bits of index into Flash Linear address field in
- * Flash Address */
- /* TODO: TBD maybe check the index against the size of flash */
-
- E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
- error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
-
- /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
- * sequence a few more times, else read in (shift in) the Flash Data0,
- * the order is least significant byte first msb to lsb */
- if (error == E1000_SUCCESS) {
- flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
- if (size == 1) {
- *data = (u8)(flash_data & 0x000000FF);
- } else if (size == 2) {
- *data = (u16)(flash_data & 0x0000FFFF);
- }
- break;
- } else {
- /* If we've gotten here, then things are probably completely hosed,
- * but if the error condition is detected, it won't hurt to give
- * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
- */
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcerr == 1) {
- /* Repeat for some time before giving up. */
- continue;
- } else if (hsfsts.hsf_status.flcdone == 0) {
- DEBUGOUT("Timeout error - flash cycle did not complete.");
- break;
- }
- }
- } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
-
- return error;
-}
-
-/******************************************************************************
- * Writes One /two bytes to the NVM using the ICH8 flash access registers.
- *
- * hw - The pointer to the hw structure
- * index - The index of the byte/word to read.
- * size - Size of data to read, 1=byte 2=word
- * data - The byte(s) to write to the NVM.
- *****************************************************************************/
-static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
- u16 data)
-{
- union ich8_hws_flash_status hsfsts;
- union ich8_hws_flash_ctrl hsflctl;
- u32 flash_linear_address;
- u32 flash_data = 0;
- s32 error = -E1000_ERR_EEPROM;
- s32 count = 0;
-
- DEBUGFUNC("e1000_write_ich8_data");
-
- if (size < 1 || size > 2 || data > size * 0xff ||
- index > ICH_FLASH_LINEAR_ADDR_MASK)
- return error;
-
- flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
- hw->flash_base_addr;
-
- do {
- udelay(1);
- /* Steps */
- error = e1000_ich8_cycle_init(hw);
- if (error != E1000_SUCCESS)
- break;
-
- hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
- /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
- hsflctl.hsf_ctrl.fldbcount = size -1;
- hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
- /* Write the last 24 bits of index into Flash Linear address field in
- * Flash Address */
- E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
- if (size == 1)
- flash_data = (u32)data & 0x00FF;
- else
- flash_data = (u32)data;
-
- E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
-
- /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
- * sequence a few more times else done */
- error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
- if (error == E1000_SUCCESS) {
- break;
- } else {
- /* If we're here, then things are most likely completely hosed,
- * but if the error condition is detected, it won't hurt to give
- * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
- */
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcerr == 1) {
- /* Repeat for some time before giving up. */
- continue;
- } else if (hsfsts.hsf_status.flcdone == 0) {
- DEBUGOUT("Timeout error - flash cycle did not complete.");
- break;
- }
- }
- } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
-
- return error;
-}
-
-/******************************************************************************
- * Reads a single byte from the NVM using the ICH8 flash access registers.
- *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to read.
- * data - Pointer to a byte to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 word = 0;
-
- status = e1000_read_ich8_data(hw, index, 1, &word);
- if (status == E1000_SUCCESS) {
- *data = (u8)word;
- }
-
- return status;
-}
-
-/******************************************************************************
- * Writes a single byte to the NVM using the ICH8 flash access registers.
- * Performs verification by reading back the value and then going through
- * a retry algorithm before giving up.
- *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to write.
- * byte - The byte to write to the NVM.
- *****************************************************************************/
-static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
-{
- s32 error = E1000_SUCCESS;
- s32 program_retries = 0;
-
- DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
-
- error = e1000_write_ich8_byte(hw, index, byte);
-
- if (error != E1000_SUCCESS) {
- for (program_retries = 0; program_retries < 100; program_retries++) {
- DEBUGOUT2("Retrying \t Byte := %2.2X Offset := %d\n", byte, index);
- error = e1000_write_ich8_byte(hw, index, byte);
- udelay(100);
- if (error == E1000_SUCCESS)
- break;
- }
- }
-
- if (program_retries == 100)
- error = E1000_ERR_EEPROM;
-
- return error;
-}
-
-/******************************************************************************
- * Writes a single byte to the NVM using the ICH8 flash access registers.
- *
- * hw - pointer to e1000_hw structure
- * index - The index of the byte to read.
- * data - The byte to write to the NVM.
- *****************************************************************************/
-static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
-{
- s32 status = E1000_SUCCESS;
- u16 word = (u16)data;
-
- status = e1000_write_ich8_data(hw, index, 1, word);
-
- return status;
-}
-
-/******************************************************************************
- * Reads a word from the NVM using the ICH8 flash access registers.
- *
- * hw - pointer to e1000_hw structure
- * index - The starting byte index of the word to read.
- * data - Pointer to a word to store the value read.
- *****************************************************************************/
-static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
-{
- s32 status = E1000_SUCCESS;
- status = e1000_read_ich8_data(hw, index, 2, data);
- return status;
-}
-
-/******************************************************************************
- * Erases the bank specified. Each bank may be a 4, 8 or 64k block. Banks are 0
- * based.
- *
- * hw - pointer to e1000_hw structure
- * bank - 0 for first bank, 1 for second bank
- *
- * Note that this function may actually erase as much as 8 or 64 KBytes. The
- * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
- * bank size may be 4, 8 or 64 KBytes
- *****************************************************************************/
-static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
-{
- union ich8_hws_flash_status hsfsts;
- union ich8_hws_flash_ctrl hsflctl;
- u32 flash_linear_address;
- s32 count = 0;
- s32 error = E1000_ERR_EEPROM;
- s32 iteration;
- s32 sub_sector_size = 0;
- s32 bank_size;
- s32 j = 0;
- s32 error_flag = 0;
-
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
-
- /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
- /* 00: The Hw sector is 256 bytes, hence we need to erase 16
- * consecutive sectors. The start index for the nth Hw sector can be
- * calculated as bank * 4096 + n * 256
- * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
- * The start index for the nth Hw sector can be calculated
- * as bank * 4096
- * 10: The HW sector is 8K bytes
- * 11: The Hw sector size is 64K bytes */
- if (hsfsts.hsf_status.berasesz == 0x0) {
- /* Hw sector size 256 */
- sub_sector_size = ICH_FLASH_SEG_SIZE_256;
- bank_size = ICH_FLASH_SECTOR_SIZE;
- iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
- } else if (hsfsts.hsf_status.berasesz == 0x1) {
- bank_size = ICH_FLASH_SEG_SIZE_4K;
- iteration = 1;
- } else if (hsfsts.hsf_status.berasesz == 0x3) {
- bank_size = ICH_FLASH_SEG_SIZE_64K;
- iteration = 1;
- } else {
- return error;
- }
-
- for (j = 0; j < iteration ; j++) {
- do {
- count++;
- /* Steps */
- error = e1000_ich8_cycle_init(hw);
- if (error != E1000_SUCCESS) {
- error_flag = 1;
- break;
- }
-
- /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
- * Control */
- hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
- hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
-
- /* Write the last 24 bits of an index within the block into Flash
- * Linear address field in Flash Address. This probably needs to
- * be calculated here based off the on-chip erase sector size and
- * the software bank size (4, 8 or 64 KBytes) */
- flash_linear_address = bank * bank_size + j * sub_sector_size;
- flash_linear_address += hw->flash_base_addr;
- flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
-
- E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
-
- error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
- /* Check if FCERR is set to 1. If 1, clear it and try the whole
- * sequence a few more times else Done */
- if (error == E1000_SUCCESS) {
- break;
- } else {
- hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
- if (hsfsts.hsf_status.flcerr == 1) {
- /* repeat for some time before giving up */
- continue;
- } else if (hsfsts.hsf_status.flcdone == 0) {
- error_flag = 1;
- break;
- }
- }
- } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
- if (error_flag == 1)
- break;
- }
- if (error_flag != 1)
- error = E1000_SUCCESS;
- return error;
-}
-
-static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
- u32 cnf_base_addr,
- u32 cnf_size)
-{
- u32 ret_val = E1000_SUCCESS;
- u16 word_addr, reg_data, reg_addr;
- u16 i;
-
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
-
- /* cnf_size is returned in size of dwords */
- for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
- if (ret_val)
- return ret_val;
-
- ret_val = e1000_get_software_flag(hw);
- if (ret_val != E1000_SUCCESS)
- return ret_val;
-
- ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
-
- e1000_release_software_flag(hw);
- }
-
- return ret_val;
-}
-
-
-/******************************************************************************
- * This function initializes the PHY from the NVM on ICH8 platforms. This
- * is needed due to an issue where the NVM configuration is not properly
- * autoloaded after power transitions. Therefore, after each PHY reset, we
- * will load the configuration data out of the NVM manually.
- *
- * hw: Struct containing variables accessed by shared code
- *****************************************************************************/
-static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw)
-{
- u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
-
- if (hw->phy_type != e1000_phy_igp_3)
- return E1000_SUCCESS;
-
- /* Check if SW needs configure the PHY */
- reg_data = er32(FEXTNVM);
- if (!(reg_data & FEXTNVM_SW_CONFIG))
- return E1000_SUCCESS;
-
- /* Wait for basic configuration completes before proceeding*/
- loop = 0;
- do {
- reg_data = er32(STATUS) & E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
- loop++;
- } while ((!reg_data) && (loop < 50));
-
- /* Clear the Init Done bit for the next init event */
- reg_data = er32(STATUS);
- reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
- ew32(STATUS, reg_data);
-
- /* Make sure HW does not configure LCD from PHY extended configuration
- before SW configuration */
- reg_data = er32(EXTCNF_CTRL);
- if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
- reg_data = er32(EXTCNF_SIZE);
- cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
- cnf_size >>= 16;
- if (cnf_size) {
- reg_data = er32(EXTCNF_CTRL);
- cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
- /* cnf_base_addr is in DWORD */
- cnf_base_addr >>= 16;
-
- /* Configure LCD from extended configuration region. */
- ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
- cnf_size);
- if (ret_val)
- return ret_val;
- }
- }
-
- return E1000_SUCCESS;
+ DEBUGFUNC("e1000_get_phy_cfg_done");
+ mdelay(10);
+ return E1000_SUCCESS;
}
-
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index a8866bdbb67..9acfddb0daf 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,7 +35,6 @@
#include "e1000_osdep.h"
-
/* Forward declarations of structures used by the shared code */
struct e1000_hw;
struct e1000_hw_stats;
@@ -43,252 +42,231 @@ struct e1000_hw_stats;
/* Enumerated types specific to the e1000 hardware */
/* Media Access Controlers */
typedef enum {
- e1000_undefined = 0,
- e1000_82542_rev2_0,
- e1000_82542_rev2_1,
- e1000_82543,
- e1000_82544,
- e1000_82540,
- e1000_82545,
- e1000_82545_rev_3,
- e1000_82546,
- e1000_82546_rev_3,
- e1000_82541,
- e1000_82541_rev_2,
- e1000_82547,
- e1000_82547_rev_2,
- e1000_82571,
- e1000_82572,
- e1000_82573,
- e1000_80003es2lan,
- e1000_ich8lan,
- e1000_num_macs
+ e1000_undefined = 0,
+ e1000_82542_rev2_0,
+ e1000_82542_rev2_1,
+ e1000_82543,
+ e1000_82544,
+ e1000_82540,
+ e1000_82545,
+ e1000_82545_rev_3,
+ e1000_82546,
+ e1000_82546_rev_3,
+ e1000_82541,
+ e1000_82541_rev_2,
+ e1000_82547,
+ e1000_82547_rev_2,
+ e1000_num_macs
} e1000_mac_type;
typedef enum {
- e1000_eeprom_uninitialized = 0,
- e1000_eeprom_spi,
- e1000_eeprom_microwire,
- e1000_eeprom_flash,
- e1000_eeprom_ich8,
- e1000_eeprom_none, /* No NVM support */
- e1000_num_eeprom_types
+ e1000_eeprom_uninitialized = 0,
+ e1000_eeprom_spi,
+ e1000_eeprom_microwire,
+ e1000_eeprom_flash,
+ e1000_eeprom_none, /* No NVM support */
+ e1000_num_eeprom_types
} e1000_eeprom_type;
/* Media Types */
typedef enum {
- e1000_media_type_copper = 0,
- e1000_media_type_fiber = 1,
- e1000_media_type_internal_serdes = 2,
- e1000_num_media_types
+ e1000_media_type_copper = 0,
+ e1000_media_type_fiber = 1,
+ e1000_media_type_internal_serdes = 2,
+ e1000_num_media_types
} e1000_media_type;
typedef enum {
- e1000_10_half = 0,
- e1000_10_full = 1,
- e1000_100_half = 2,
- e1000_100_full = 3
+ e1000_10_half = 0,
+ e1000_10_full = 1,
+ e1000_100_half = 2,
+ e1000_100_full = 3
} e1000_speed_duplex_type;
/* Flow Control Settings */
typedef enum {
- E1000_FC_NONE = 0,
- E1000_FC_RX_PAUSE = 1,
- E1000_FC_TX_PAUSE = 2,
- E1000_FC_FULL = 3,
- E1000_FC_DEFAULT = 0xFF
+ E1000_FC_NONE = 0,
+ E1000_FC_RX_PAUSE = 1,
+ E1000_FC_TX_PAUSE = 2,
+ E1000_FC_FULL = 3,
+ E1000_FC_DEFAULT = 0xFF
} e1000_fc_type;
struct e1000_shadow_ram {
- u16 eeprom_word;
- bool modified;
+ u16 eeprom_word;
+ bool modified;
};
/* PCI bus types */
typedef enum {
- e1000_bus_type_unknown = 0,
- e1000_bus_type_pci,
- e1000_bus_type_pcix,
- e1000_bus_type_pci_express,
- e1000_bus_type_reserved
+ e1000_bus_type_unknown = 0,
+ e1000_bus_type_pci,
+ e1000_bus_type_pcix,
+ e1000_bus_type_reserved
} e1000_bus_type;
/* PCI bus speeds */
typedef enum {
- e1000_bus_speed_unknown = 0,
- e1000_bus_speed_33,
- e1000_bus_speed_66,
- e1000_bus_speed_100,
- e1000_bus_speed_120,
- e1000_bus_speed_133,
- e1000_bus_speed_2500,
- e1000_bus_speed_reserved
+ e1000_bus_speed_unknown = 0,
+ e1000_bus_speed_33,
+ e1000_bus_speed_66,
+ e1000_bus_speed_100,
+ e1000_bus_speed_120,
+ e1000_bus_speed_133,
+ e1000_bus_speed_reserved
} e1000_bus_speed;
/* PCI bus widths */
typedef enum {
- e1000_bus_width_unknown = 0,
- /* These PCIe values should literally match the possible return values
- * from config space */
- e1000_bus_width_pciex_1 = 1,
- e1000_bus_width_pciex_2 = 2,
- e1000_bus_width_pciex_4 = 4,
- e1000_bus_width_32,
- e1000_bus_width_64,
- e1000_bus_width_reserved
+ e1000_bus_width_unknown = 0,
+ e1000_bus_width_32,
+ e1000_bus_width_64,
+ e1000_bus_width_reserved
} e1000_bus_width;
/* PHY status info structure and supporting enums */
typedef enum {
- e1000_cable_length_50 = 0,
- e1000_cable_length_50_80,
- e1000_cable_length_80_110,
- e1000_cable_length_110_140,
- e1000_cable_length_140,
- e1000_cable_length_undefined = 0xFF
+ e1000_cable_length_50 = 0,
+ e1000_cable_length_50_80,
+ e1000_cable_length_80_110,
+ e1000_cable_length_110_140,
+ e1000_cable_length_140,
+ e1000_cable_length_undefined = 0xFF
} e1000_cable_length;
typedef enum {
- e1000_gg_cable_length_60 = 0,
- e1000_gg_cable_length_60_115 = 1,
- e1000_gg_cable_length_115_150 = 2,
- e1000_gg_cable_length_150 = 4
+ e1000_gg_cable_length_60 = 0,
+ e1000_gg_cable_length_60_115 = 1,
+ e1000_gg_cable_length_115_150 = 2,
+ e1000_gg_cable_length_150 = 4
} e1000_gg_cable_length;
typedef enum {
- e1000_igp_cable_length_10 = 10,
- e1000_igp_cable_length_20 = 20,
- e1000_igp_cable_length_30 = 30,
- e1000_igp_cable_length_40 = 40,
- e1000_igp_cable_length_50 = 50,
- e1000_igp_cable_length_60 = 60,
- e1000_igp_cable_length_70 = 70,
- e1000_igp_cable_length_80 = 80,
- e1000_igp_cable_length_90 = 90,
- e1000_igp_cable_length_100 = 100,
- e1000_igp_cable_length_110 = 110,
- e1000_igp_cable_length_115 = 115,
- e1000_igp_cable_length_120 = 120,
- e1000_igp_cable_length_130 = 130,
- e1000_igp_cable_length_140 = 140,
- e1000_igp_cable_length_150 = 150,
- e1000_igp_cable_length_160 = 160,
- e1000_igp_cable_length_170 = 170,
- e1000_igp_cable_length_180 = 180
+ e1000_igp_cable_length_10 = 10,
+ e1000_igp_cable_length_20 = 20,
+ e1000_igp_cable_length_30 = 30,
+ e1000_igp_cable_length_40 = 40,
+ e1000_igp_cable_length_50 = 50,
+ e1000_igp_cable_length_60 = 60,
+ e1000_igp_cable_length_70 = 70,
+ e1000_igp_cable_length_80 = 80,
+ e1000_igp_cable_length_90 = 90,
+ e1000_igp_cable_length_100 = 100,
+ e1000_igp_cable_length_110 = 110,
+ e1000_igp_cable_length_115 = 115,
+ e1000_igp_cable_length_120 = 120,
+ e1000_igp_cable_length_130 = 130,
+ e1000_igp_cable_length_140 = 140,
+ e1000_igp_cable_length_150 = 150,
+ e1000_igp_cable_length_160 = 160,
+ e1000_igp_cable_length_170 = 170,
+ e1000_igp_cable_length_180 = 180
} e1000_igp_cable_length;
typedef enum {
- e1000_10bt_ext_dist_enable_normal = 0,
- e1000_10bt_ext_dist_enable_lower,
- e1000_10bt_ext_dist_enable_undefined = 0xFF
+ e1000_10bt_ext_dist_enable_normal = 0,
+ e1000_10bt_ext_dist_enable_lower,
+ e1000_10bt_ext_dist_enable_undefined = 0xFF
} e1000_10bt_ext_dist_enable;
typedef enum {
- e1000_rev_polarity_normal = 0,
- e1000_rev_polarity_reversed,
- e1000_rev_polarity_undefined = 0xFF
+ e1000_rev_polarity_normal = 0,
+ e1000_rev_polarity_reversed,
+ e1000_rev_polarity_undefined = 0xFF
} e1000_rev_polarity;
typedef enum {
- e1000_downshift_normal = 0,
- e1000_downshift_activated,
- e1000_downshift_undefined = 0xFF
+ e1000_downshift_normal = 0,
+ e1000_downshift_activated,
+ e1000_downshift_undefined = 0xFF
} e1000_downshift;
typedef enum {
- e1000_smart_speed_default = 0,
- e1000_smart_speed_on,
- e1000_smart_speed_off
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
} e1000_smart_speed;
typedef enum {
- e1000_polarity_reversal_enabled = 0,
- e1000_polarity_reversal_disabled,
- e1000_polarity_reversal_undefined = 0xFF
+ e1000_polarity_reversal_enabled = 0,
+ e1000_polarity_reversal_disabled,
+ e1000_polarity_reversal_undefined = 0xFF
} e1000_polarity_reversal;
typedef enum {
- e1000_auto_x_mode_manual_mdi = 0,
- e1000_auto_x_mode_manual_mdix,
- e1000_auto_x_mode_auto1,
- e1000_auto_x_mode_auto2,
- e1000_auto_x_mode_undefined = 0xFF
+ e1000_auto_x_mode_manual_mdi = 0,
+ e1000_auto_x_mode_manual_mdix,
+ e1000_auto_x_mode_auto1,
+ e1000_auto_x_mode_auto2,
+ e1000_auto_x_mode_undefined = 0xFF
} e1000_auto_x_mode;
typedef enum {
- e1000_1000t_rx_status_not_ok = 0,
- e1000_1000t_rx_status_ok,
- e1000_1000t_rx_status_undefined = 0xFF
+ e1000_1000t_rx_status_not_ok = 0,
+ e1000_1000t_rx_status_ok,
+ e1000_1000t_rx_status_undefined = 0xFF
} e1000_1000t_rx_status;
typedef enum {
e1000_phy_m88 = 0,
e1000_phy_igp,
- e1000_phy_igp_2,
- e1000_phy_gg82563,
- e1000_phy_igp_3,
- e1000_phy_ife,
e1000_phy_undefined = 0xFF
} e1000_phy_type;
typedef enum {
- e1000_ms_hw_default = 0,
- e1000_ms_force_master,
- e1000_ms_force_slave,
- e1000_ms_auto
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
} e1000_ms_type;
typedef enum {
- e1000_ffe_config_enabled = 0,
- e1000_ffe_config_active,
- e1000_ffe_config_blocked
+ e1000_ffe_config_enabled = 0,
+ e1000_ffe_config_active,
+ e1000_ffe_config_blocked
} e1000_ffe_config;
typedef enum {
- e1000_dsp_config_disabled = 0,
- e1000_dsp_config_enabled,
- e1000_dsp_config_activated,
- e1000_dsp_config_undefined = 0xFF
+ e1000_dsp_config_disabled = 0,
+ e1000_dsp_config_enabled,
+ e1000_dsp_config_activated,
+ e1000_dsp_config_undefined = 0xFF
} e1000_dsp_config;
struct e1000_phy_info {
- e1000_cable_length cable_length;
- e1000_10bt_ext_dist_enable extended_10bt_distance;
- e1000_rev_polarity cable_polarity;
- e1000_downshift downshift;
- e1000_polarity_reversal polarity_correction;
- e1000_auto_x_mode mdix_mode;
- e1000_1000t_rx_status local_rx;
- e1000_1000t_rx_status remote_rx;
+ e1000_cable_length cable_length;
+ e1000_10bt_ext_dist_enable extended_10bt_distance;
+ e1000_rev_polarity cable_polarity;
+ e1000_downshift downshift;
+ e1000_polarity_reversal polarity_correction;
+ e1000_auto_x_mode mdix_mode;
+ e1000_1000t_rx_status local_rx;
+ e1000_1000t_rx_status remote_rx;
};
struct e1000_phy_stats {
- u32 idle_errors;
- u32 receive_errors;
+ u32 idle_errors;
+ u32 receive_errors;
};
struct e1000_eeprom_info {
- e1000_eeprom_type type;
- u16 word_size;
- u16 opcode_bits;
- u16 address_bits;
- u16 delay_usec;
- u16 page_size;
- bool use_eerd;
- bool use_eewr;
+ e1000_eeprom_type type;
+ u16 word_size;
+ u16 opcode_bits;
+ u16 address_bits;
+ u16 delay_usec;
+ u16 page_size;
};
/* Flex ASF Information */
#define E1000_HOST_IF_MAX_SIZE 2048
typedef enum {
- e1000_byte_align = 0,
- e1000_word_align = 1,
- e1000_dword_align = 2
+ e1000_byte_align = 0,
+ e1000_word_align = 1,
+ e1000_dword_align = 2
} e1000_align_type;
-
-
/* Error Codes */
#define E1000_SUCCESS 0
#define E1000_ERR_EEPROM 1
@@ -301,7 +279,6 @@ typedef enum {
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
-#define E1000_ERR_SWFW_SYNC 13
#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \
(((_value) & 0xff00) >> 8))
@@ -318,19 +295,17 @@ s32 e1000_setup_link(struct e1000_hw *hw);
s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
void e1000_config_collision_dist(struct e1000_hw *hw);
s32 e1000_check_for_link(struct e1000_hw *hw);
-s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex);
s32 e1000_force_mac_fc(struct e1000_hw *hw);
/* PHY */
-s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data);
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
s32 e1000_phy_hw_reset(struct e1000_hw *hw);
s32 e1000_phy_reset(struct e1000_hw *hw);
s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
-void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
-
/* EEPROM Functions */
s32 e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -338,66 +313,63 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw);
u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
#define E1000_MNG_IAMT_MODE 0x3
#define E1000_MNG_ICH_IAMT_MODE 0x2
-#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */
#define E1000_VFTA_ENTRY_SHIFT 0x5
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
struct e1000_host_mng_command_header {
- u8 command_id;
- u8 checksum;
- u16 reserved1;
- u16 reserved2;
- u16 command_length;
+ u8 command_id;
+ u8 checksum;
+ u16 reserved1;
+ u16 reserved2;
+ u16 command_length;
};
struct e1000_host_mng_command_info {
- struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
- u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
+ struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */
};
#ifdef __BIG_ENDIAN
-struct e1000_host_mng_dhcp_cookie{
- u32 signature;
- u16 vlan_id;
- u8 reserved0;
- u8 status;
- u32 reserved1;
- u8 checksum;
- u8 reserved3;
- u16 reserved2;
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u16 vlan_id;
+ u8 reserved0;
+ u8 status;
+ u32 reserved1;
+ u8 checksum;
+ u8 reserved3;
+ u16 reserved2;
};
#else
-struct e1000_host_mng_dhcp_cookie{
- u32 signature;
- u8 status;
- u8 reserved0;
- u16 vlan_id;
- u32 reserved1;
- u16 reserved2;
- u8 reserved3;
- u8 checksum;
+struct e1000_host_mng_dhcp_cookie {
+ u32 signature;
+ u8 status;
+ u8 reserved0;
+ u16 vlan_id;
+ u32 reserved1;
+ u16 reserved2;
+ u8 reserved3;
+ u8 checksum;
};
#endif
-s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
- u16 length);
bool e1000_check_mng_mode(struct e1000_hw *hw);
-bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
+s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
-s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
-s32 e1000_read_mac_addr(struct e1000_hw * hw);
+s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
/* Filters (multicast, vlan, receive) */
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
@@ -417,18 +389,15 @@ s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Everything else */
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
-void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
+ u32 frame_len, u8 * mac_addr);
void e1000_get_bus_info(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw);
void e1000_pci_clear_mwi(struct e1000_hw *hw);
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
/* Port I/O is only supported on 82544 and newer */
void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
-s32 e1000_disable_pciex_master(struct e1000_hw *hw);
-s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
-
#define E1000_READ_REG_IO(a, reg) \
e1000_read_reg_io((a), E1000_##reg)
@@ -471,36 +440,7 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82547EI_MOBILE 0x101A
-#define E1000_DEV_ID_82571EB_COPPER 0x105E
-#define E1000_DEV_ID_82571EB_FIBER 0x105F
-#define E1000_DEV_ID_82571EB_SERDES 0x1060
-#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
-#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
-#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5
-#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
-#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
-#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA
-#define E1000_DEV_ID_82572EI_COPPER 0x107D
-#define E1000_DEV_ID_82572EI_FIBER 0x107E
-#define E1000_DEV_ID_82572EI_SERDES 0x107F
-#define E1000_DEV_ID_82572EI 0x10B9
-#define E1000_DEV_ID_82573E 0x108B
-#define E1000_DEV_ID_82573E_IAMT 0x108C
-#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
-#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
-#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
-#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
-#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
-
-#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
-#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
-#define E1000_DEV_ID_ICH8_IGP_C 0x104B
-#define E1000_DEV_ID_ICH8_IFE 0x104C
-#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
-#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
-#define E1000_DEV_ID_ICH8_IGP_M 0x104D
-
#define NODE_ADDRESS_SIZE 6
#define ETH_LENGTH_OF_ADDRESS 6
@@ -523,21 +463,20 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
/* The sizes (in bytes) of a ethernet packet */
#define ENET_HEADER_SIZE 14
-#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
#define ETHERNET_FCS_SIZE 4
#define MINIMUM_ETHERNET_PACKET_SIZE \
(MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
#define CRC_LENGTH ETHERNET_FCS_SIZE
#define MAX_JUMBO_FRAME_SIZE 0x3F00
-
/* 802.1q VLAN Packet Sizes */
-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
/* Ethertype field values */
-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
-#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
-#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+#define ETHERNET_IP_TYPE 0x0800 /* IP packets */
+#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */
/* Packet Header defines */
#define IP_PROTOCOL_TCP 6
@@ -567,15 +506,6 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
E1000_IMS_RXSEQ | \
E1000_IMS_LSC)
-/* Additional interrupts need to be handled for e1000_ich8lan:
- DSW = The FW changed the status of the DISSW bit in FWSM
- PHYINT = The LAN connected device generates an interrupt
- EPRST = Manageability reset event */
-#define IMS_ICH8LAN_ENABLE_MASK (\
- E1000_IMS_DSW | \
- E1000_IMS_PHYINT | \
- E1000_IMS_EPRST)
-
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor. We
* reserve one of these spots for our directed address, allowing us room for
@@ -583,100 +513,98 @@ s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
*/
#define E1000_RAR_ENTRIES 15
-#define E1000_RAR_ENTRIES_ICH8LAN 6
-
#define MIN_NUMBER_OF_DESCRIPTORS 8
#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
/* Receive Descriptor */
struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
};
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
- struct {
- __le64 buffer_addr;
- __le64 reserved;
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length;
- __le16 vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
};
#define MAX_PS_BUFFERS 4
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
- struct {
- /* one buffer for protocol header(s), three data buffers */
- __le64 buffer_addr[MAX_PS_BUFFERS];
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length0; /* length of buffer 0 */
- __le16 vlan; /* VLAN tag */
- } middle;
- struct {
- __le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
- } upper;
- __le64 reserved;
- } wb; /* writeback */
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ __le16 length[3]; /* length of buffers 1-3 */
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
};
-/* Receive Decriptor bit definitions */
-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */
-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
-#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
-#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
-#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
-#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
-#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
-#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
-#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
-#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
-#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
-#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
-#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
#define E1000_RXD_SPC_PRI_SHIFT 13
-#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */
#define E1000_RXD_SPC_CFI_SHIFT 12
#define E1000_RXDEXT_STATERR_CE 0x01000000
@@ -698,7 +626,6 @@ union e1000_rx_desc_packet_split {
E1000_RXD_ERR_CXE | \
E1000_RXD_ERR_RXE)
-
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
@@ -707,152 +634,145 @@ union e1000_rx_desc_packet_split {
E1000_RXDEXT_STATERR_CXE | \
E1000_RXDEXT_STATERR_RXE)
-
/* Transmit Descriptor */
struct e1000_tx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
- __le16 special;
- } fields;
- } upper;
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
};
/* Transmit Descriptor bit definitions */
-#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
-#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
/* Offload Context Descriptor */
struct e1000_context_desc {
- union {
- __le32 ip_config;
- struct {
- u8 ipcss; /* IP checksum start */
- u8 ipcso; /* IP checksum offset */
- __le16 ipcse; /* IP checksum end */
- } ip_fields;
- } lower_setup;
- union {
- __le32 tcp_config;
- struct {
- u8 tucss; /* TCP checksum start */
- u8 tucso; /* TCP checksum offset */
- __le16 tucse; /* TCP checksum end */
- } tcp_fields;
- } upper_setup;
- __le32 cmd_and_length; /* */
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 hdr_len; /* Header length */
- __le16 mss; /* Maximum segment size */
- } fields;
- } tcp_seg_setup;
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length; /* */
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
};
/* Offload data descriptor */
struct e1000_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's buffer address */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 typ_len_ext; /* */
- u8 cmd; /* */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 popts; /* Packet Options */
- __le16 special; /* */
- } fields;
- } upper;
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext; /* */
+ u8 cmd; /* */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special; /* */
+ } fields;
+ } upper;
};
/* Filters */
-#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
-#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-
-#define E1000_NUM_UNICAST_ICH8LAN 7
-#define E1000_MC_TBL_SIZE_ICH8LAN 32
-
+#define E1000_NUM_UNICAST 16 /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address Register */
struct e1000_rar {
- volatile __le32 low; /* receive address low */
- volatile __le32 high; /* receive address high */
+ volatile __le32 low; /* receive address low */
+ volatile __le32 high; /* receive address high */
};
/* Number of entries in the Multicast Table Array (MTA). */
#define E1000_NUM_MTA_REGISTERS 128
-#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
/* IPv4 Address Table Entry */
struct e1000_ipv4_at_entry {
- volatile u32 ipv4_addr; /* IP Address (RW) */
- volatile u32 reserved;
+ volatile u32 ipv4_addr; /* IP Address (RW) */
+ volatile u32 reserved;
};
/* Four wakeup IP addresses are supported */
#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
-#define E1000_IP4AT_SIZE_ICH8LAN 3
#define E1000_IP6AT_SIZE 1
/* IPv6 Address Table Entry */
struct e1000_ipv6_at_entry {
- volatile u8 ipv6_addr[16];
+ volatile u8 ipv6_addr[16];
};
/* Flexible Filter Length Table Entry */
struct e1000_fflt_entry {
- volatile u32 length; /* Flexible Filter Length (RW) */
- volatile u32 reserved;
+ volatile u32 length; /* Flexible Filter Length (RW) */
+ volatile u32 reserved;
};
/* Flexible Filter Mask Table Entry */
struct e1000_ffmt_entry {
- volatile u32 mask; /* Flexible Filter Mask (RW) */
- volatile u32 reserved;
+ volatile u32 mask; /* Flexible Filter Mask (RW) */
+ volatile u32 reserved;
};
/* Flexible Filter Value Table Entry */
struct e1000_ffvt_entry {
- volatile u32 value; /* Flexible Filter Value (RW) */
- volatile u32 reserved;
+ volatile u32 value; /* Flexible Filter Value (RW) */
+ volatile u32 reserved;
};
/* Four Flexible Filters are supported */
@@ -879,211 +799,211 @@ struct e1000_ffvt_entry {
* R/clr - register is read only and is cleared when read
* A - register array
*/
-#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
-#define E1000_STATUS 0x00008 /* Device Status - RO */
-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
-#define E1000_EERD 0x00014 /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
-#define E1000_FLA 0x0001C /* Flash Access - RW */
-#define E1000_MDIC 0x00020 /* MDI Control - RW */
-#define E1000_SCTL 0x00024 /* SerDes Control - RW */
-#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#define E1000_FCT 0x00030 /* Flow Control Type - RW */
-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
-#define E1000_RCTL 0x00100 /* RX Control - RW */
-#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
-#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
-#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
-#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
-#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
-#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
-#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
-#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
-#define E1000_TCTL 0x00400 /* TX Control - RW */
-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
-#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
-#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
-#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
-#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* RX Control - RW */
+#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
+#define E1000_TCTL 0x00400 /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
+#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
+#define E1000_TBT 0x00448 /* TX Burst Timer - RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
#define FEXTNVM_SW_CONFIG 0x0001
-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
-#define E1000_PBS 0x01008 /* Packet Buffer Size */
-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_FLASH_UPDATES 1000
-#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
-#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
-#define E1000_FLSWCTL 0x01030 /* FLASH control register */
-#define E1000_FLSWDATA 0x01034 /* FLASH data register */
-#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
-#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
-#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
-#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
-#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
-#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
-#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
-#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
-#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
-#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
-#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
-#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
-#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
-#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
-#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
-#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
-#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
-#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
-#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
-#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
-#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
-#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
-#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
-#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
-#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
-#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
-#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
-#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
-#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
-#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
-#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
-#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
-#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
-#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
-#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
-#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
-#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
-#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
-#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
-#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
-#define E1000_COLC 0x04028 /* Collision Count - R/clr */
-#define E1000_DC 0x04030 /* Defer Count - R/clr */
-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
-#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
-#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
-#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
-#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
-#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
-#define E1000_RA 0x05400 /* Receive Address - RW Array */
-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
-#define E1000_WUC 0x05800 /* Wakeup Control - RW */
-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
-#define E1000_MANC 0x05820 /* Management Control - RW */
-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
-#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
-#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
-#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
-#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
-#define E1000_HOST_IF 0x08800 /* Host Interface */
-#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
-#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
-
-#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
-#define E1000_MDPHYA 0x0003C /* PHY address - RW */
-#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
-
-#define E1000_GCR 0x05B00 /* PCI-Ex Control */
-#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
-#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
-#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
-#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM 0x05B50 /* SW Semaphore */
-#define E1000_FWSM 0x05B54 /* FW Semaphore */
-#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
-#define E1000_HICR 0x08F00 /* Host Inteface Control */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT 0x01028 /* FLASH Timer Register */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL 0x01030 /* FLASH control register */
+#define E1000_FLSWDATA 0x01034 /* FLASH data register */
+#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */
+#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
+#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
+#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
+#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
+#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */
+#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */
+#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */
+#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */
+#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */
+#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */
+#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */
+#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA 0x0003C /* PHY address - RW */
+#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
/* RSS registers */
-#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
-#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
-#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
-#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
-#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
/* Register Set (82542)
*
* Some of the 82542 registers are located at different offsets than they are
@@ -1123,19 +1043,19 @@ struct e1000_ffvt_entry {
#define E1000_82542_RDLEN0 E1000_82542_RDLEN
#define E1000_82542_RDH0 E1000_82542_RDH
#define E1000_82542_RDT0 E1000_82542_RDT
-#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
- * RX Control - RW */
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+ * RX Control - RW */
#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
-#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
-#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
-#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
-#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
-#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
-#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
-#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
-#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
-#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
-#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */
#define E1000_82542_RDTR1 0x00130
#define E1000_82542_RDBAL1 0x00138
#define E1000_82542_RDBAH1 0x0013C
@@ -1302,288 +1222,281 @@ struct e1000_ffvt_entry {
#define E1000_82542_RSSIR E1000_RSSIR
#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
-#define E1000_82542_MANC2H E1000_MANC2H
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
- u64 crcerrs;
- u64 algnerrc;
- u64 symerrs;
- u64 rxerrc;
- u64 txerrc;
- u64 mpc;
- u64 scc;
- u64 ecol;
- u64 mcc;
- u64 latecol;
- u64 colc;
- u64 dc;
- u64 tncrs;
- u64 sec;
- u64 cexterr;
- u64 rlec;
- u64 xonrxc;
- u64 xontxc;
- u64 xoffrxc;
- u64 xofftxc;
- u64 fcruc;
- u64 prc64;
- u64 prc127;
- u64 prc255;
- u64 prc511;
- u64 prc1023;
- u64 prc1522;
- u64 gprc;
- u64 bprc;
- u64 mprc;
- u64 gptc;
- u64 gorcl;
- u64 gorch;
- u64 gotcl;
- u64 gotch;
- u64 rnbc;
- u64 ruc;
- u64 rfc;
- u64 roc;
- u64 rlerrc;
- u64 rjc;
- u64 mgprc;
- u64 mgpdc;
- u64 mgptc;
- u64 torl;
- u64 torh;
- u64 totl;
- u64 toth;
- u64 tpr;
- u64 tpt;
- u64 ptc64;
- u64 ptc127;
- u64 ptc255;
- u64 ptc511;
- u64 ptc1023;
- u64 ptc1522;
- u64 mptc;
- u64 bptc;
- u64 tsctc;
- u64 tsctfc;
- u64 iac;
- u64 icrxptc;
- u64 icrxatc;
- u64 ictxptc;
- u64 ictxatc;
- u64 ictxqec;
- u64 ictxqmtc;
- u64 icrxdmtc;
- u64 icrxoc;
+ u64 crcerrs;
+ u64 algnerrc;
+ u64 symerrs;
+ u64 rxerrc;
+ u64 txerrc;
+ u64 mpc;
+ u64 scc;
+ u64 ecol;
+ u64 mcc;
+ u64 latecol;
+ u64 colc;
+ u64 dc;
+ u64 tncrs;
+ u64 sec;
+ u64 cexterr;
+ u64 rlec;
+ u64 xonrxc;
+ u64 xontxc;
+ u64 xoffrxc;
+ u64 xofftxc;
+ u64 fcruc;
+ u64 prc64;
+ u64 prc127;
+ u64 prc255;
+ u64 prc511;
+ u64 prc1023;
+ u64 prc1522;
+ u64 gprc;
+ u64 bprc;
+ u64 mprc;
+ u64 gptc;
+ u64 gorcl;
+ u64 gorch;
+ u64 gotcl;
+ u64 gotch;
+ u64 rnbc;
+ u64 ruc;
+ u64 rfc;
+ u64 roc;
+ u64 rlerrc;
+ u64 rjc;
+ u64 mgprc;
+ u64 mgpdc;
+ u64 mgptc;
+ u64 torl;
+ u64 torh;
+ u64 totl;
+ u64 toth;
+ u64 tpr;
+ u64 tpt;
+ u64 ptc64;
+ u64 ptc127;
+ u64 ptc255;
+ u64 ptc511;
+ u64 ptc1023;
+ u64 ptc1522;
+ u64 mptc;
+ u64 bptc;
+ u64 tsctc;
+ u64 tsctfc;
+ u64 iac;
+ u64 icrxptc;
+ u64 icrxatc;
+ u64 ictxptc;
+ u64 ictxatc;
+ u64 ictxqec;
+ u64 ictxqmtc;
+ u64 icrxdmtc;
+ u64 icrxoc;
};
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw {
- u8 __iomem *hw_addr;
- u8 __iomem *flash_address;
- e1000_mac_type mac_type;
- e1000_phy_type phy_type;
- u32 phy_init_script;
- e1000_media_type media_type;
- void *back;
- struct e1000_shadow_ram *eeprom_shadow_ram;
- u32 flash_bank_size;
- u32 flash_base_addr;
- e1000_fc_type fc;
- e1000_bus_speed bus_speed;
- e1000_bus_width bus_width;
- e1000_bus_type bus_type;
+ u8 __iomem *hw_addr;
+ u8 __iomem *flash_address;
+ e1000_mac_type mac_type;
+ e1000_phy_type phy_type;
+ u32 phy_init_script;
+ e1000_media_type media_type;
+ void *back;
+ struct e1000_shadow_ram *eeprom_shadow_ram;
+ u32 flash_bank_size;
+ u32 flash_base_addr;
+ e1000_fc_type fc;
+ e1000_bus_speed bus_speed;
+ e1000_bus_width bus_width;
+ e1000_bus_type bus_type;
struct e1000_eeprom_info eeprom;
- e1000_ms_type master_slave;
- e1000_ms_type original_master_slave;
- e1000_ffe_config ffe_config_state;
- u32 asf_firmware_present;
- u32 eeprom_semaphore_present;
- u32 swfw_sync_present;
- u32 swfwhw_semaphore_present;
- unsigned long io_base;
- u32 phy_id;
- u32 phy_revision;
- u32 phy_addr;
- u32 original_fc;
- u32 txcw;
- u32 autoneg_failed;
- u32 max_frame_size;
- u32 min_frame_size;
- u32 mc_filter_type;
- u32 num_mc_addrs;
- u32 collision_delta;
- u32 tx_packet_delta;
- u32 ledctl_default;
- u32 ledctl_mode1;
- u32 ledctl_mode2;
- bool tx_pkt_filtering;
+ e1000_ms_type master_slave;
+ e1000_ms_type original_master_slave;
+ e1000_ffe_config ffe_config_state;
+ u32 asf_firmware_present;
+ u32 eeprom_semaphore_present;
+ unsigned long io_base;
+ u32 phy_id;
+ u32 phy_revision;
+ u32 phy_addr;
+ u32 original_fc;
+ u32 txcw;
+ u32 autoneg_failed;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u32 ledctl_default;
+ u32 ledctl_mode1;
+ u32 ledctl_mode2;
+ bool tx_pkt_filtering;
struct e1000_host_mng_dhcp_cookie mng_cookie;
- u16 phy_spd_default;
- u16 autoneg_advertised;
- u16 pci_cmd_word;
- u16 fc_high_water;
- u16 fc_low_water;
- u16 fc_pause_time;
- u16 current_ifs_val;
- u16 ifs_min_val;
- u16 ifs_max_val;
- u16 ifs_step_size;
- u16 ifs_ratio;
- u16 device_id;
- u16 vendor_id;
- u16 subsystem_id;
- u16 subsystem_vendor_id;
- u8 revision_id;
- u8 autoneg;
- u8 mdix;
- u8 forced_speed_duplex;
- u8 wait_autoneg_complete;
- u8 dma_fairness;
- u8 mac_addr[NODE_ADDRESS_SIZE];
- u8 perm_mac_addr[NODE_ADDRESS_SIZE];
- bool disable_polarity_correction;
- bool speed_downgraded;
- e1000_smart_speed smart_speed;
- e1000_dsp_config dsp_config_state;
- bool get_link_status;
- bool serdes_link_down;
- bool tbi_compatibility_en;
- bool tbi_compatibility_on;
- bool laa_is_present;
- bool phy_reset_disable;
- bool initialize_hw_bits_disable;
- bool fc_send_xon;
- bool fc_strict_ieee;
- bool report_tx_early;
- bool adaptive_ifs;
- bool ifs_params_forced;
- bool in_ifs_mode;
- bool mng_reg_access_disabled;
- bool leave_av_bit_off;
- bool kmrn_lock_loss_workaround_disabled;
- bool bad_tx_carr_stats_fd;
- bool has_manc2h;
- bool rx_needs_kicking;
- bool has_smbus;
+ u16 phy_spd_default;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+ u16 fc_high_water;
+ u16 fc_low_water;
+ u16 fc_pause_time;
+ u16 current_ifs_val;
+ u16 ifs_min_val;
+ u16 ifs_max_val;
+ u16 ifs_step_size;
+ u16 ifs_ratio;
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 autoneg;
+ u8 mdix;
+ u8 forced_speed_duplex;
+ u8 wait_autoneg_complete;
+ u8 dma_fairness;
+ u8 mac_addr[NODE_ADDRESS_SIZE];
+ u8 perm_mac_addr[NODE_ADDRESS_SIZE];
+ bool disable_polarity_correction;
+ bool speed_downgraded;
+ e1000_smart_speed smart_speed;
+ e1000_dsp_config dsp_config_state;
+ bool get_link_status;
+ bool serdes_has_link;
+ bool tbi_compatibility_en;
+ bool tbi_compatibility_on;
+ bool laa_is_present;
+ bool phy_reset_disable;
+ bool initialize_hw_bits_disable;
+ bool fc_send_xon;
+ bool fc_strict_ieee;
+ bool report_tx_early;
+ bool adaptive_ifs;
+ bool ifs_params_forced;
+ bool in_ifs_mode;
+ bool mng_reg_access_disabled;
+ bool leave_av_bit_off;
+ bool bad_tx_carr_stats_fd;
+ bool has_smbus;
};
-
-#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
-#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
-#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
-#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
-#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */
/* Register Bit Masks */
/* Device Control */
-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
-#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
-#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
-#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
-#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
-#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
-#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
-#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
-#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
-#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
-#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
-#define E1000_CTRL_RST 0x04000000 /* Global reset */
-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */
/* Device Status */
-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
-#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
+#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */
#define E1000_STATUS_SPEED_MASK 0x000000C0
-#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
-#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
- by EEPROM/Flash */
-#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
-#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
-#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
-#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
-#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
-#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
-#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
-#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
-#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
-#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
-#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
-#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
-#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion
+ by EEPROM/Flash */
+#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */
+#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */
+#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
#define E1000_STATUS_FUSE_8 0x04000000
#define E1000_STATUS_FUSE_9 0x08000000
-#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
-#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
+#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */
-/* Constants used to intrepret the masked PCI-X bus speed. */
-#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
-#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
-#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
/* EEPROM/Flash Control */
-#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
-#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
-#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
-#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */
#define E1000_EECD_FWE_MASK 0x00000030
-#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
-#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */
#define E1000_EECD_FWE_SHIFT 4
-#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
-#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
-#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
-#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
-#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
- * (0-small, 1-large) */
-#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+ * (0-small, 1-large) */
+#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
#ifndef E1000_EEPROM_GRANT_ATTEMPTS
-#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
-#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
+#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
-#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
-#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
-#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
-#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
-#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
-#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
-#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SECVAL_SHIFT 22
#define E1000_STM_OPCODE 0xDB00
#define E1000_HICR_FW_RESET 0xC0
@@ -1593,12 +1506,12 @@ struct e1000_hw {
#define E1000_ICH_NVM_SIG_MASK 0xC0
/* EEPROM Read */
-#define E1000_EERD_START 0x00000001 /* Start Read */
-#define E1000_EERD_DONE 0x00000010 /* Read Done */
+#define E1000_EERD_START 0x00000001 /* Start Read */
+#define E1000_EERD_DONE 0x00000010 /* Read Done */
#define E1000_EERD_ADDR_SHIFT 8
-#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
+#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */
#define E1000_EERD_DATA_SHIFT 16
-#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
+#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */
/* SPI EEPROM Status Register */
#define EEPROM_STATUS_RDY_SPI 0x01
@@ -1608,25 +1521,25 @@ struct e1000_hw {
#define EEPROM_STATUS_WPEN_SPI 0x80
/* Extended Device Control */
-#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
-#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
-#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
-#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
-#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
-#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA
-#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
-#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
-#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
-#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
-#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
-#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
-#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
-#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
-#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
-#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
-#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@@ -1638,11 +1551,11 @@ struct e1000_hw {
#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
-#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
-#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
-#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000
/* MDI Control */
@@ -1742,167 +1655,167 @@ struct e1000_hw {
#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Receive Address */
-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
/* Interrupt Cause Read */
-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXO 0x00000040 /* rx overrun */
-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
-#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
-#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
-#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
#define E1000_ICR_TXD_LOW 0x00008000
#define E1000_ICR_SRPD 0x00010000
-#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
-#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
-#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
-#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
-#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
-#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
-#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
+#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */
/* Interrupt Cause Set */
-#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_ICS_SRPD E1000_ICR_SRPD
-#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
#define E1000_ICS_DSW E1000_ICR_DSW
#define E1000_ICS_PHYINT E1000_ICR_PHYINT
#define E1000_ICS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Set */
-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMS_SRPD E1000_ICR_SRPD
-#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
#define E1000_IMS_DSW E1000_ICR_DSW
#define E1000_IMS_PHYINT E1000_ICR_PHYINT
#define E1000_IMS_EPRST E1000_ICR_EPRST
/* Interrupt Mask Clear */
-#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
-#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
-#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
-#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
-#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
-#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
-#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
-#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
+#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */
+#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
+#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
+#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */
+#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */
+#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */
+#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */
+#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */
+#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */
#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW
#define E1000_IMC_SRPD E1000_ICR_SRPD
-#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
-#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
-#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
-#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
-#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
-#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
-#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
-#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */
+#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */
+#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
#define E1000_IMC_DSW E1000_ICR_DSW
#define E1000_IMC_PHYINT E1000_ICR_PHYINT
#define E1000_IMC_EPRST E1000_ICR_EPRST
/* Receive Control */
-#define E1000_RCTL_RST 0x00000001 /* Software reset */
-#define E1000_RCTL_EN 0x00000002 /* enable */
-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
-#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
-#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
-#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
-#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
-#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
-#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
-#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
-#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
-#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
-#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
-#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
-#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
-#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
-#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
-#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
-#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
-#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
-#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
+#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */
/* Use byte values for the following shift parameters
* Usage:
@@ -1925,10 +1838,10 @@ struct e1000_hw {
#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SW_W_SYNC definitions */
#define E1000_SWFW_EEP_SM 0x0001
@@ -1937,17 +1850,17 @@ struct e1000_hw {
#define E1000_SWFW_MAC_CSR_SM 0x0008
/* Receive Descriptor */
-#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
-#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
-#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
-#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
-#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
+#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */
+#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */
+#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */
/* Flow Control */
-#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
-#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
-#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
/* Header split receive */
#define E1000_RFCTL_ISCSI_DIS 0x00000001
@@ -1967,66 +1880,64 @@ struct e1000_hw {
#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
/* Receive Descriptor Control */
-#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
-#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
-#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
-#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
/* Transmit Descriptor Control */
-#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
-#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
-#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
-#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
-#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
-#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
-#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
- still to be processed. */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+ still to be processed. */
/* Transmit Configuration Word */
-#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
-#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
-#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
-#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
-#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
-#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
-#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
-#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */
+#define E1000_TXCW_NP 0x00008000 /* TXCW next page */
+#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */
+#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
/* Receive Configuration Word */
-#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
-#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
-#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
-#define E1000_RXCW_CC 0x10000000 /* Receive config change */
-#define E1000_RXCW_C 0x20000000 /* Receive config */
-#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
-#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_CC 0x10000000 /* Receive config change */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */
/* Transmit Control */
-#define E1000_TCTL_RST 0x00000001 /* software reset */
-#define E1000_TCTL_EN 0x00000002 /* enable tx */
-#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
-#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
-#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
-#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
+#define E1000_TCTL_RST 0x00000001 /* software reset */
+#define E1000_TCTL_EN 0x00000002 /* enable tx */
+#define E1000_TCTL_BCE 0x00000004 /* busy check enable */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */
+#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
/* Extended Transmit Control */
-#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-
-#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX 0x00010000
+#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
/* Receive Checksum Control */
-#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
-#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Multiple Receive Queue Control */
#define E1000_MRQC_ENABLE_MASK 0x00000003
@@ -2042,141 +1953,141 @@ struct e1000_hw {
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define E1000_WUC_APME 0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
-#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
-#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
-#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM 0x80000000 /* Enable SPM */
/* Wake Up Filter Control */
-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
-#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
-#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
-#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
-#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
-#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
/* Wake Up Status */
-#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
-#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
-#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
-#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
-#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
-#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
-#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
-#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
-#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
-#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
-#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
-#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
-#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC 0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
/* Management Control */
-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
-#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
-#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
-#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
-#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
-#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
-#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
-#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
- * Filtering */
-#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
-#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
-#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
- * filtering */
-#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
- * memory */
-#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
- * filtering */
-#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
-#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
-#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
-#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
-#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
-#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
-#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
-#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
-
-#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
-#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery
+ * Filtering */
+#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
+ * filtering */
+#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
+ * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
+ * filtering */
+#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */
/* SW Semaphore Register */
-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
-#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
-#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
/* FW Semaphore Register */
-#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */
#define E1000_FWSM_MODE_SHIFT 1
-#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
+#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */
-#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
-#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
-#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
+#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */
#define E1000_FWSM_SKUEL_SHIFT 29
-#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
-#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
-#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
-#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
/* FFLT Debug Register */
-#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
+#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */
typedef enum {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_interface_only
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_interface_only
} e1000_mng_mode;
-/* Host Inteface Control Register */
-#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
-#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
- * to put command in RAM */
-#define E1000_HICR_SV 0x00000004 /* Status Validity */
-#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
+/* Host Interface Control Register */
+#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */
+#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done
+ * to put command in RAM */
+#define E1000_HICR_SV 0x00000004 /* Status Validity */
+#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */
/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
-#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
-#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
-#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
-#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
+#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
struct e1000_host_command_header {
- u8 command_id;
- u8 command_length;
- u8 command_options; /* I/F bits for command, status for return */
- u8 checksum;
+ u8 command_id;
+ u8 command_length;
+ u8 command_options; /* I/F bits for command, status for return */
+ u8 checksum;
};
struct e1000_host_command_info {
- struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
- u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
+ struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
+ u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
};
/* Host SMB register #0 */
-#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
-#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
-#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
-#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
+#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */
/* Host SMB register #1 */
#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN
@@ -2185,10 +2096,10 @@ struct e1000_host_command_info {
#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT
/* FW Status Register */
-#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
+#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */
/* Wake Up Packet Length */
-#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */
#define E1000_MDALIGN 4096
@@ -2242,24 +2153,24 @@ struct e1000_host_command_info {
#define PCI_EX_LINK_WIDTH_SHIFT 4
/* EEPROM Commands - Microwire */
-#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
-#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
-#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
-#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
+#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */
/* EEPROM Commands - SPI */
-#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
-#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
-#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
-#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
-#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
-#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
-#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
-#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
-#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
-#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
+#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
/* EEPROM Size definitions */
#define EEPROM_WORD_SIZE_SHIFT 6
@@ -2270,7 +2181,7 @@ struct e1000_host_command_info {
#define EEPROM_COMPAT 0x0003
#define EEPROM_ID_LED_SETTINGS 0x0004
#define EEPROM_VERSION 0x0005
-#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
#define EEPROM_PHY_CLASS_WORD 0x0007
#define EEPROM_INIT_CONTROL1_REG 0x000A
#define EEPROM_INIT_CONTROL2_REG 0x000F
@@ -2283,22 +2194,16 @@ struct e1000_host_command_info {
#define EEPROM_FLASH_VERSION 0x0032
#define EEPROM_CHECKSUM_REG 0x003F
-#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
-#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
+#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */
/* Word definitions for ID LED Settings */
#define ID_LED_RESERVED_0000 0x0000
#define ID_LED_RESERVED_FFFF 0xFFFF
-#define ID_LED_RESERVED_82573 0xF746
-#define ID_LED_DEFAULT_82573 0x1811
#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_OFF2 << 8) | \
- (ID_LED_DEF1_ON2 << 4) | \
- (ID_LED_DEF1_DEF2))
#define ID_LED_DEF1_DEF2 0x1
#define ID_LED_DEF1_ON2 0x2
#define ID_LED_DEF1_OFF2 0x3
@@ -2313,7 +2218,6 @@ struct e1000_host_command_info {
#define IGP_ACTIVITY_LED_ENABLE 0x0300
#define IGP_LED3_MODE 0x07000000
-
/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F
@@ -2384,11 +2288,8 @@ struct e1000_host_command_info {
#define DEFAULT_82542_TIPG_IPGR2 10
#define DEFAULT_82543_TIPG_IPGR2 6
-#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
#define E1000_TIPG_IPGR2_SHIFT 20
-#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
-#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000 0x00000008
#define E1000_TXDMAC_DPP 0x00000001
/* Adaptive IFS defines */
@@ -2421,9 +2322,9 @@ struct e1000_host_command_info {
#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
/* PBA constants */
-#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
-#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
-#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
+#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_20K 0x0014
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
@@ -2432,7 +2333,7 @@ struct e1000_host_command_info {
#define E1000_PBA_34K 0x0022
#define E1000_PBA_38K 0x0026
#define E1000_PBA_40K 0x0028
-#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
+#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
#define E1000_PBS_16K E1000_PBA_16K
@@ -2442,9 +2343,9 @@ struct e1000_host_command_info {
#define FLOW_CONTROL_TYPE 0x8808
/* The historical defaults for the flow control values are given below. */
-#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
-#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
-#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
+#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
+#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
+#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
/* PCIX Config space */
#define PCIX_COMMAND_REGISTER 0xE6
@@ -2458,7 +2359,6 @@ struct e1000_host_command_info {
#define PCIX_STATUS_HI_MMRBC_4K 0x3
#define PCIX_STATUS_HI_MMRBC_2K 0x2
-
/* Number of bits required to shift right the "pause" bits from the
* EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
*/
@@ -2479,14 +2379,11 @@ struct e1000_host_command_info {
*/
#define ILOS_SHIFT 3
-
#define RECEIVE_BUFFER_ALIGN_SIZE (256)
/* Number of milliseconds we wait for auto-negotiation to complete */
#define LINK_UP_TIMEOUT 500
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
#define AUTO_READ_DONE_TIMEOUT 10
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
@@ -2534,7 +2431,6 @@ struct e1000_host_command_info {
(((length) > (adapter)->min_frame_size) && \
((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
-
/* Structures, enums, and macros for the PHY */
/* Bit definitions for the Management Data IO (MDIO) and Management Data
@@ -2551,49 +2447,49 @@ struct e1000_host_command_info {
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
-#define PHY_CTRL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Regiser */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
-#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
-#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
+#define PHY_CTRL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */
/* M88E1000 Specific Registers */
-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
-#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
-#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
-#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
-
-#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
-#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
-#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
-#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
#define IGP01E1000_IEEE_REGS_PAGE 0x0000
#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
#define IGP01E1000_IEEE_FORCE_GIGA 0x0140
/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
-#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
-#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
#define IGP02E1000_PHY_POWER_MGMT 0x19
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */
/* IGP01E1000 AGC Registers - stores the cable length values*/
#define IGP01E1000_PHY_AGC_A 0x1172
@@ -2636,192 +2532,119 @@ struct e1000_host_command_info {
#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0
-/* Bits...
- * 15-5: page
- * 4-0: register offset
- */
-#define GG82563_PAGE_SHIFT 5
-#define GG82563_REG(page, reg) \
- (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
-#define GG82563_MIN_ALT_REG 30
-
-/* GG82563 Specific Registers */
-#define GG82563_PHY_SPEC_CTRL \
- GG82563_REG(0, 16) /* PHY Specific Control */
-#define GG82563_PHY_SPEC_STATUS \
- GG82563_REG(0, 17) /* PHY Specific Status */
-#define GG82563_PHY_INT_ENABLE \
- GG82563_REG(0, 18) /* Interrupt Enable */
-#define GG82563_PHY_SPEC_STATUS_2 \
- GG82563_REG(0, 19) /* PHY Specific Status 2 */
-#define GG82563_PHY_RX_ERR_CNTR \
- GG82563_REG(0, 21) /* Receive Error Counter */
-#define GG82563_PHY_PAGE_SELECT \
- GG82563_REG(0, 22) /* Page Select */
-#define GG82563_PHY_SPEC_CTRL_2 \
- GG82563_REG(0, 26) /* PHY Specific Control 2 */
-#define GG82563_PHY_PAGE_SELECT_ALT \
- GG82563_REG(0, 29) /* Alternate Page Select */
-#define GG82563_PHY_TEST_CLK_CTRL \
- GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
-
-#define GG82563_PHY_MAC_SPEC_CTRL \
- GG82563_REG(2, 21) /* MAC Specific Control Register */
-#define GG82563_PHY_MAC_SPEC_CTRL_2 \
- GG82563_REG(2, 26) /* MAC Specific Control 2 */
-
-#define GG82563_PHY_DSP_DISTANCE \
- GG82563_REG(5, 26) /* DSP Distance */
-
-/* Page 193 - Port Control Registers */
-#define GG82563_PHY_KMRN_MODE_CTRL \
- GG82563_REG(193, 16) /* Kumeran Mode Control */
-#define GG82563_PHY_PORT_RESET \
- GG82563_REG(193, 17) /* Port Reset */
-#define GG82563_PHY_REVISION_ID \
- GG82563_REG(193, 18) /* Revision ID */
-#define GG82563_PHY_DEVICE_ID \
- GG82563_REG(193, 19) /* Device ID */
-#define GG82563_PHY_PWR_MGMT_CTRL \
- GG82563_REG(193, 20) /* Power Management Control */
-#define GG82563_PHY_RATE_ADAPT_CTRL \
- GG82563_REG(193, 25) /* Rate Adaptation Control */
-
-/* Page 194 - KMRN Registers */
-#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
- GG82563_REG(194, 16) /* FIFO's Control/Status */
-#define GG82563_PHY_KMRN_CTRL \
- GG82563_REG(194, 17) /* Control */
-#define GG82563_PHY_INBAND_CTRL \
- GG82563_REG(194, 18) /* Inband Control */
-#define GG82563_PHY_KMRN_DIAGNOSTIC \
- GG82563_REG(194, 19) /* Diagnostic */
-#define GG82563_PHY_ACK_TIMEOUTS \
- GG82563_REG(194, 20) /* Acknowledge Timeouts */
-#define GG82563_PHY_ADV_ABILITY \
- GG82563_REG(194, 21) /* Advertised Ability */
-#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
- GG82563_REG(194, 23) /* Link Partner Advertised Ability */
-#define GG82563_PHY_ADV_NEXT_PAGE \
- GG82563_REG(194, 24) /* Advertised Next Page */
-#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
- GG82563_REG(194, 25) /* Link Partner Advertised Next page */
-#define GG82563_PHY_KMRN_MISC \
- GG82563_REG(194, 26) /* Misc. */
-
/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Autoneg Advertisement Register */
-#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
-#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
-#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
-#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
-#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
-#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
-#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
-#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
-#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
-#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
-#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
-#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
-#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */
/* Next Page TX Register */
-#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
-#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
- * of different NP
- */
-#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
- * 0 = cannot comply with msg
- */
-#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
-#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
- * 0 = sending last NP
- */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
/* Link Partner Next Page Register */
-#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
-#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
- * of different NP
- */
-#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
- * 0 = cannot comply with msg
- */
-#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
-#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
-#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
- * 0 = sending last NP
- */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges
+ * of different NP
+ */
+#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg
+ * 0 = cannot comply with msg
+ */
+#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow
+ * 0 = sending last NP
+ */
/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
- /* 0=DTE device */
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
- /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
- /* 0=Automatic Master/Slave config */
-#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
+ /* 0=DTE device */
+#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
+ /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
+ /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
/* 1000BASE-T Status Register */
-#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
-#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
-#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
-#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12
#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13
#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
@@ -2829,64 +2652,64 @@ struct e1000_host_command_info {
#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100
/* Extended Status Register */
-#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
-#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
-#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
+#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */
-#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
- /* (0=enable, 1=disable) */
+#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */
+ /* (0=enable, 1=disable) */
/* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
-#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
-#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
- * 0=CLK125 toggling
- */
-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
- /* Manual MDI configuration */
-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
-#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
- * 100BASE-TX/10BASE-T:
- * MDI Mode
- */
-#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
- * all speeds.
- */
+#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low,
+ * 0=CLK125 toggling
+ */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
+ /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
+ * 100BASE-TX/10BASE-T:
+ * MDI Mode
+ */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
+ * all speeds.
+ */
#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
- /* 1=Enable Extended 10BASE-T distance
- * (Lower 10BASE-T RX Threshold)
- * 0=Normal 10BASE-T RX Threshold */
+ /* 1=Enable Extended 10BASE-T distance
+ * (Lower 10BASE-T RX Threshold)
+ * 0=Normal 10BASE-T RX Threshold */
#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100
- /* 1=5-Bit interface in 100BASE-TX
- * 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
-#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+ /* 1=5-Bit interface in 100BASE-TX
+ * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1
#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5
#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
/* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
-#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
- * 3=110-140M;4=>140M */
-#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
-#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
-#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
-#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
-#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+ * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5
@@ -2894,12 +2717,12 @@ struct e1000_host_command_info {
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/* M88E1000 Extended PHY Specific Control Register */
-#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
-#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
- * Will assert lost lock and bring
- * link down if idle not seen
- * within 1ms in 1000BASE-T
- */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master */
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
@@ -2914,9 +2737,9 @@ struct e1000_host_command_info {
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300
-#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
-#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */
/* M88EC018 Rev 2 specific DownShift settings */
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
@@ -2938,18 +2761,18 @@ struct e1000_host_command_info {
#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000
/* IGP01E1000 Specific Port Status Register - R/O */
-#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C
#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200
#define IGP01E1000_PSSR_LINK_UP 0x0400
#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */
#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000
#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
-#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */
/* IGP01E1000 Specific Port Control Register - R/W */
#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010
@@ -2957,16 +2780,16 @@ struct e1000_host_command_info {
#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400
#define IGP01E1000_PSCR_FLIP_CHIP 0x0800
#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */
/* IGP01E1000 Specific Port Link Health Register */
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000
#define IGP01E1000_PLHR_MASTER_FAULT 0x2000
#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000
-#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
-#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
-#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */
#define IGP01E1000_PLHR_DATA_ERR_0 0x0100
#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040
#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010
@@ -2981,9 +2804,9 @@ struct e1000_host_command_info {
#define IGP01E1000_MSE_CHANNEL_B 0x0F00
#define IGP01E1000_MSE_CHANNEL_A 0xF000
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */
/* IGP01E1000 DSP reset macros */
#define DSP_RESET_ENABLE 0x0
@@ -2992,8 +2815,8 @@ struct e1000_host_command_info {
/* IGP01E1000 & IGP02E1000 AGC Registers */
-#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
+#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */
/* IGP02E1000 AGC Register Length 9-bit mask */
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
@@ -3011,9 +2834,9 @@ struct e1000_host_command_info {
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
/* IGP01E1000 GMII FIFO Register */
-#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
- * on Link-Up */
-#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
+#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed
+ * on Link-Up */
+#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */
/* IGP01E1000 Analog Register */
#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1
@@ -3032,114 +2855,6 @@ struct e1000_host_command_info {
#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080
#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_DISABLE_JABBER 0x0001 /* 1=Disable Jabber */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Polarity Reversal Disabled */
-#define GG82563_PSCR_POWER_DOWN 0x0004 /* 1=Power Down */
-#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE 0x0008 /* 1=Transmitter Disabled */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI configuration */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX configuration */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Automatic crossover */
-#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE 0x0080 /* 1=Enable Extended Distance */
-#define GG82563_PSCR_ENERGY_DETECT_MASK 0x0300
-#define GG82563_PSCR_ENERGY_DETECT_OFF 0x0000 /* 00,01=Off */
-#define GG82563_PSCR_ENERGY_DETECT_RX 0x0200 /* 10=Sense on Rx only (Energy Detect) */
-#define GG82563_PSCR_ENERGY_DETECT_RX_TM 0x0300 /* 11=Sense and Tx NLP */
-#define GG82563_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force Link Good */
-#define GG82563_PSCR_DOWNSHIFT_ENABLE 0x0800 /* 1=Enable Downshift */
-#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK 0x7000
-#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT 12
-
-/* PHY Specific Status Register (Page 0, Register 17) */
-#define GG82563_PSSR_JABBER 0x0001 /* 1=Jabber */
-#define GG82563_PSSR_POLARITY 0x0002 /* 1=Polarity Reversed */
-#define GG82563_PSSR_LINK 0x0008 /* 1=Link is Up */
-#define GG82563_PSSR_ENERGY_DETECT 0x0010 /* 1=Sleep, 0=Active */
-#define GG82563_PSSR_DOWNSHIFT 0x0020 /* 1=Downshift */
-#define GG82563_PSSR_CROSSOVER_STATUS 0x0040 /* 1=MDIX, 0=MDI */
-#define GG82563_PSSR_RX_PAUSE_ENABLED 0x0100 /* 1=Receive Pause Enabled */
-#define GG82563_PSSR_TX_PAUSE_ENABLED 0x0200 /* 1=Transmit Pause Enabled */
-#define GG82563_PSSR_LINK_UP 0x0400 /* 1=Link Up */
-#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
-#define GG82563_PSSR_PAGE_RECEIVED 0x1000 /* 1=Page Received */
-#define GG82563_PSSR_DUPLEX 0x2000 /* 1-Full-Duplex */
-#define GG82563_PSSR_SPEED_MASK 0xC000
-#define GG82563_PSSR_SPEED_10MBPS 0x0000 /* 00=10Mbps */
-#define GG82563_PSSR_SPEED_100MBPS 0x4000 /* 01=100Mbps */
-#define GG82563_PSSR_SPEED_1000MBPS 0x8000 /* 10=1000Mbps */
-
-/* PHY Specific Status Register 2 (Page 0, Register 19) */
-#define GG82563_PSSR2_JABBER 0x0001 /* 1=Jabber */
-#define GG82563_PSSR2_POLARITY_CHANGED 0x0002 /* 1=Polarity Changed */
-#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
-#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT 0x0020 /* 1=Downshift Detected */
-#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE 0x0040 /* 1=Crossover Changed */
-#define GG82563_PSSR2_FALSE_CARRIER 0x0100 /* 1=False Carrier */
-#define GG82563_PSSR2_SYMBOL_ERROR 0x0200 /* 1=Symbol Error */
-#define GG82563_PSSR2_LINK_STATUS_CHANGED 0x0400 /* 1=Link Status Changed */
-#define GG82563_PSSR2_AUTO_NEG_COMPLETED 0x0800 /* 1=Auto-Neg Completed */
-#define GG82563_PSSR2_PAGE_RECEIVED 0x1000 /* 1=Page Received */
-#define GG82563_PSSR2_DUPLEX_CHANGED 0x2000 /* 1=Duplex Changed */
-#define GG82563_PSSR2_SPEED_CHANGED 0x4000 /* 1=Speed Changed */
-#define GG82563_PSSR2_AUTO_NEG_ERROR 0x8000 /* 1=Auto-Neg Error */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_10BT_POLARITY_FORCE 0x0002 /* 1=Force Negative Polarity */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK 0x000C
-#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL 0x0000 /* 00,01=Normal Operation */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS 0x0008 /* 10=Select 112ns Sequence */
-#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS 0x000C /* 11=Select 16ns Sequence */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Negotiation */
-#define GG82563_PSCR2_1000BT_DISABLE 0x4000 /* 1=Disable 1000BASE-T */
-#define GG82563_PSCR2_TRANSMITER_TYPE_MASK 0x8000
-#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B 0x0000 /* 0=Class B */
-#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A 0x8000 /* 1=Class A */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ 0x0006
-#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
- 1 = 50-80M;
- 2 = 80-110M;
- 3 = 110-140M;
- 4 = >140M */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
-#define GG82563_KMCR_FORCE_LINK_UP 0x0040 /* 1=Force Link Up */
-#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT 0x0080
-#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK 0x0400
-#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT 0x0400 /* 1=6.25MHz, 0=0.8MHz */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 /* 1=Enalbe SERDES Electrical Idle */
-#define GG82563_PMCR_DISABLE_PORT 0x0002 /* 1=Disable Port */
-#define GG82563_PMCR_DISABLE_SERDES 0x0004 /* 1=Disable SERDES */
-#define GG82563_PMCR_REVERSE_AUTO_NEG 0x0008 /* 1=Enable Reverse Auto-Negotiation */
-#define GG82563_PMCR_DISABLE_1000_NON_D0 0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
-#define GG82563_PMCR_DISABLE_1000 0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
-#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A 0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
-#define GG82563_PMCR_FORCE_POWER_STATE 0x0080 /* 1=Force Power State */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK 0x0300
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR 0x0000 /* 00=Dr */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U 0x0100 /* 01=D0u */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A 0x0200 /* 10=D0a */
-#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3 0x0300 /* 11=D3 */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding Use */
-
-
/* Bit definitions for valid PHY IDs. */
/* I = Integrated
* E = External
@@ -3154,8 +2869,6 @@ struct e1000_host_command_info {
#define M88E1011_I_REV_4 0x04
#define M88E1111_I_PHY_ID 0x01410CC0
#define L1LXT971A_PHY_ID 0x001378E0
-#define GG82563_E_PHY_ID 0x01410CA0
-
/* Bits...
* 15-5: page
@@ -3166,41 +2879,41 @@ struct e1000_host_command_info {
(((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
#define IGP3_PHY_PORT_CTRL \
- PHY_REG(769, 17) /* Port General Configuration */
+ PHY_REG(769, 17) /* Port General Configuration */
#define IGP3_PHY_RATE_ADAPT_CTRL \
- PHY_REG(769, 25) /* Rate Adapter Control Register */
+ PHY_REG(769, 25) /* Rate Adapter Control Register */
#define IGP3_KMRN_FIFO_CTRL_STATS \
- PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+ PHY_REG(770, 16) /* KMRN FIFO's control/status register */
#define IGP3_KMRN_POWER_MNG_CTRL \
- PHY_REG(770, 17) /* KMRN Power Management Control Register */
+ PHY_REG(770, 17) /* KMRN Power Management Control Register */
#define IGP3_KMRN_INBAND_CTRL \
- PHY_REG(770, 18) /* KMRN Inband Control Register */
+ PHY_REG(770, 18) /* KMRN Inband Control Register */
#define IGP3_KMRN_DIAG \
- PHY_REG(770, 19) /* KMRN Diagnostic register */
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+ PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
#define IGP3_KMRN_ACK_TIMEOUT \
- PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+ PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
#define IGP3_VR_CTRL \
- PHY_REG(776, 18) /* Voltage regulator control register */
-#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
-#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
+ PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
#define IGP3_CAPABILITY \
- PHY_REG(776, 19) /* IGP3 Capability Register */
+ PHY_REG(776, 19) /* IGP3 Capability Register */
/* Capabilities for SKU Control */
-#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
-#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
-#define IGP3_CAP_ASF 0x0004 /* Support ASF */
-#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
-#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
-#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
-#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
-#define IGP3_CAP_RSS 0x0080 /* Support RSS */
-#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
-#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
+#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */
#define IGP3_PPC_JORDAN_EN 0x0001
#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002
@@ -3210,69 +2923,69 @@ struct e1000_host_command_info {
#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020
#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040
-#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
-#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
+#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */
#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18)
#define IGP3_KMRN_EC_DIS_INBAND 0x0080
#define IGP03E1000_E_PHY_ID 0x02A80390
-#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
+#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */
#define IFE_PLUS_E_PHY_ID 0x02A80320
#define IFE_C_E_PHY_ID 0x02A80310
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
-#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
-#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnet Counter */
-#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
-#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
-#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
-#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
-#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
-#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
-#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
-
-#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Defaut 1 = Disable auto reduced power down */
-#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
-#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
-#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
-#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
-#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
-#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */
+#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */
#define IFE_PESC_POLARITY_REVERSED_SHIFT 8
-#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dyanmic Power Down disabled */
-#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
-#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */
#define IFE_PSC_FORCE_POLARITY_SHIFT 5
#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */
#define IFE_PMC_MDIX_MODE_SHIFT 6
-#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
-
-#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
-#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
-#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
-#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
-#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
-#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
-#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
-#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
-#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
-#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
#define ICH_FLASH_SEG_SIZE_256 256
#define ICH_FLASH_SEG_SIZE_4K 4096
#define ICH_FLASH_SEG_SIZE_64K 65536
@@ -3305,74 +3018,6 @@ struct e1000_host_command_info {
#define ICH_GFPREG_BASE_MASK 0x1FFF
#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
-/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
-/* Offset 04h HSFSTS */
-union ich8_hws_flash_status {
- struct ich8_hsfsts {
-#ifdef __BIG_ENDIAN
- u16 reserved2 :6;
- u16 fldesvalid :1;
- u16 flockdn :1;
- u16 flcdone :1;
- u16 flcerr :1;
- u16 dael :1;
- u16 berasesz :2;
- u16 flcinprog :1;
- u16 reserved1 :2;
-#else
- u16 flcdone :1; /* bit 0 Flash Cycle Done */
- u16 flcerr :1; /* bit 1 Flash Cycle Error */
- u16 dael :1; /* bit 2 Direct Access error Log */
- u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
- u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
- u16 reserved1 :2; /* bit 13:6 Reserved */
- u16 reserved2 :6; /* bit 13:6 Reserved */
- u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
- u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
-#endif
- } hsf_status;
- u16 regval;
-};
-
-/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
-/* Offset 06h FLCTL */
-union ich8_hws_flash_ctrl {
- struct ich8_hsflctl {
-#ifdef __BIG_ENDIAN
- u16 fldbcount :2;
- u16 flockdn :6;
- u16 flcgo :1;
- u16 flcycle :2;
- u16 reserved :5;
-#else
- u16 flcgo :1; /* 0 Flash Cycle Go */
- u16 flcycle :2; /* 2:1 Flash Cycle */
- u16 reserved :5; /* 7:3 Reserved */
- u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
- u16 flockdn :6; /* 15:10 Reserved */
-#endif
- } hsf_ctrl;
- u16 regval;
-};
-
-/* ICH8 Flash Region Access Permissions */
-union ich8_hws_flash_regacc {
- struct ich8_flracc {
-#ifdef __BIG_ENDIAN
- u32 gmwag :8;
- u32 gmrag :8;
- u32 grwa :8;
- u32 grra :8;
-#else
- u32 grra :8; /* 0:7 GbE region Read Access */
- u32 grwa :8; /* 8:15 GbE region Write Access */
- u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
- u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
-#endif
- } hsf_flregacc;
- u16 regval;
-};
-
/* Miscellaneous PHY bit definitions. */
#define PHY_PREAMBLE 0xFFFFFFFF
#define PHY_SOF 0x01
@@ -3384,10 +3029,10 @@ union ich8_hws_flash_regacc {
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
#define E1000_PHY_ADDRESS 0x01
-#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
-#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
#define PHY_REVISION_MASK 0xFFFFFFF0
-#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
+#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */
#define REG4_SPEED_MASK 0x01E0
#define REG9_SPEED_MASK 0x0300
#define ADVERTISE_10_HALF 0x0001
@@ -3396,8 +3041,8 @@ union ich8_hws_flash_regacc {
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010
#define ADVERTISE_1000_FULL 0x0020
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
-#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds*/
-#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds*/
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c66dd4f9437..bcd192ca47b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k3-NAPI"
+#define DRV_VERSION "7.3.21-k5-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -131,7 +131,6 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
-static irqreturn_t e1000_intr_msi(int irq, void *data);
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
static int e1000_clean(struct napi_struct *napi, int budget);
@@ -258,25 +257,14 @@ module_exit(e1000_exit_module);
static int e1000_request_irq(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
irq_handler_t handler = e1000_intr;
int irq_flags = IRQF_SHARED;
int err;
- if (hw->mac_type >= e1000_82571) {
- adapter->have_msi = !pci_enable_msi(adapter->pdev);
- if (adapter->have_msi) {
- handler = e1000_intr_msi;
- irq_flags = 0;
- }
- }
-
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
DPRINTK(PROBE, ERR,
"Unable to allocate interrupt Error: %d\n", err);
}
@@ -289,9 +277,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
}
/**
@@ -345,76 +330,6 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
}
}
-/**
- * e1000_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
- *
- * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the network i/f is closed.
- *
- **/
-
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
-{
- u32 ctrl_ext;
- u32 swsm;
- struct e1000_hw *hw = &adapter->hw;
-
- /* Let firmware taken over control of h/w */
- switch (hw->mac_type) {
- case e1000_82573:
- swsm = er32(SWSM);
- ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- ctrl_ext = er32(CTRL_EXT);
- ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
- break;
- default:
- break;
- }
-}
-
-/**
- * e1000_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
- *
- * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the network i/f is open.
- *
- **/
-
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
-{
- u32 ctrl_ext;
- u32 swsm;
- struct e1000_hw *hw = &adapter->hw;
-
- /* Let firmware know the driver has taken over */
- switch (hw->mac_type) {
- case e1000_82573:
- swsm = er32(SWSM);
- ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
- break;
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- ctrl_ext = er32(CTRL_EXT);
- ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
- break;
- default:
- break;
- }
-}
-
static void e1000_init_manageability(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -425,20 +340,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
- /* enable receiving management packets to the host */
- /* this will probably generate destination unreachable messages
- * from the host OS, but the packets will be handled on SMBUS */
- if (hw->has_manc2h) {
- u32 manc2h = er32(MANC2H);
-
- manc |= E1000_MANC_EN_MNG2HOST;
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
- ew32(MANC2H, manc2h);
- }
-
ew32(MANC, manc);
}
}
@@ -453,12 +354,6 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
- if (hw->has_manc2h)
- manc &= ~E1000_MANC_EN_MNG2HOST;
-
- /* don't explicitly have to mess with MANC2H since
- * MANC has an enable disable that gates MANC2H */
-
ew32(MANC, manc);
}
}
@@ -563,15 +458,6 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
if (er32(MANC) & E1000_MANC_SMBUS_EN)
goto out;
break;
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_80003es2lan:
- case e1000_ich8lan:
- if (e1000_check_mng_mode(hw) ||
- e1000_check_phy_reset_block(hw))
- goto out;
- break;
default:
goto out;
}
@@ -599,8 +485,7 @@ void e1000_down(struct e1000_adapter *adapter)
ew32(RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
- /* can be netif_tx_disable when NETIF_F_LLTX is removed */
- netif_stop_queue(netdev);
+ netif_tx_disable(netdev);
/* disable transmits in the hardware */
tctl = er32(TCTL);
@@ -671,16 +556,6 @@ void e1000_reset(struct e1000_adapter *adapter)
legacy_pba_adjust = true;
pba = E1000_PBA_30K;
break;
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
- pba = E1000_PBA_38K;
- break;
- case e1000_82573:
- pba = E1000_PBA_20K;
- break;
- case e1000_ich8lan:
- pba = E1000_PBA_8K;
case e1000_undefined:
case e1000_num_macs:
break;
@@ -744,16 +619,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* if short on rx space, rx wins and must trump tx
* adjustment or use Early Receive if available */
- if (pba < min_rx_space) {
- switch (hw->mac_type) {
- case e1000_82573:
- /* ERT enabled in e1000_configure_rx */
- break;
- default:
- pba = min_rx_space;
- break;
- }
- }
+ if (pba < min_rx_space)
+ pba = min_rx_space;
}
}
@@ -789,7 +656,6 @@ void e1000_reset(struct e1000_adapter *adapter)
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
if (hw->mac_type >= e1000_82544 &&
- hw->mac_type <= e1000_82547_rev_2 &&
hw->autoneg == 1 &&
hw->autoneg_advertised == ADVERTISE_1000_FULL) {
u32 ctrl = er32(CTRL);
@@ -806,20 +672,6 @@ void e1000_reset(struct e1000_adapter *adapter)
e1000_reset_adaptive(hw);
e1000_phy_get_info(hw, &adapter->phy_info);
- if (!adapter->smart_power_down &&
- (hw->mac_type == e1000_82571 ||
- hw->mac_type == e1000_82572)) {
- u16 phy_data = 0;
- /* speed up time to link by disabling smart power down, ignore
- * the return value of this function because there is nothing
- * different we would do if it failed */
- e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- &phy_data);
- phy_data &= ~IGP02E1000_PM_SPD;
- e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- phy_data);
- }
-
e1000_release_manageability(adapter);
}
@@ -1046,17 +898,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_sw_init;
err = -EIO;
- /* Flash BAR mapping must happen after e1000_sw_init
- * because it depends on mac_type */
- if ((hw->mac_type == e1000_ich8lan) &&
- (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- hw->flash_address = pci_ioremap_bar(pdev, 1);
- if (!hw->flash_address)
- goto err_flashmap;
- }
-
- if (e1000_check_phy_reset_block(hw))
- DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
if (hw->mac_type >= e1000_82543) {
netdev->features = NETIF_F_SG |
@@ -1064,21 +905,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- if (hw->mac_type == e1000_ich8lan)
- netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
}
if ((hw->mac_type >= e1000_82544) &&
(hw->mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
- if (hw->mac_type > e1000_82547_rev_2)
- netdev->features |= NETIF_F_TSO6;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_SG;
@@ -1153,15 +989,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
eeprom_apme_mask = E1000_EEPROM_82544_APM;
break;
- case e1000_ich8lan:
- e1000_read_eeprom(hw,
- EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
- eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
- break;
case e1000_82546:
case e1000_82546_rev_3:
- case e1000_82571:
- case e1000_80003es2lan:
if (er32(STATUS) & E1000_STATUS_FUNC_1){
e1000_read_eeprom(hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -1185,17 +1014,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
- case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- case E1000_DEV_ID_82571EB_QUAD_COPPER:
- case E1000_DEV_ID_82571EB_QUAD_FIBER:
- case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
- case E1000_DEV_ID_82571PT_QUAD_COPPER:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
@@ -1213,39 +1037,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* print bus type/speed/width info */
DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
- ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
- (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
- ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
- (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+ ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
+ ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
(hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
- ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
- (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
- (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
- "32-bit"));
+ ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
printk("%pM\n", netdev->dev_addr);
- if (hw->bus_type == e1000_bus_type_pci_express) {
- DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
- "longer be supported by this driver in the future.\n",
- pdev->vendor, pdev->device);
- DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
- "driver instead.\n");
- }
-
/* reset the hardware with the new settings */
e1000_reset(adapter);
- /* If the controller is 82573 and f/w is AMT, do not set
- * DRV_LOAD until the interface is up. For all other cases,
- * let the f/w know that the h/w is now under the control
- * of the driver. */
- if (hw->mac_type != e1000_82573 ||
- !e1000_check_mng_mode(hw))
- e1000_get_hw_control(adapter);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
@@ -1260,14 +1063,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return 0;
err_register:
- e1000_release_hw_control(adapter);
err_eeprom:
- if (!e1000_check_phy_reset_block(hw))
- e1000_phy_hw_reset(hw);
+ e1000_phy_hw_reset(hw);
if (hw->flash_address)
iounmap(hw->flash_address);
-err_flashmap:
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
@@ -1298,18 +1098,18 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ set_bit(__E1000_DOWN, &adapter->flags);
+ del_timer_sync(&adapter->tx_fifo_stall_timer);
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_info_timer);
+
cancel_work_sync(&adapter->reset_task);
e1000_release_manageability(adapter);
- /* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
- e1000_release_hw_control(adapter);
-
unregister_netdev(netdev);
- if (!e1000_check_phy_reset_block(hw))
- e1000_phy_hw_reset(hw);
+ e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
@@ -1472,12 +1272,6 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
}
- /* If AMT is enabled, let the firmware know that the network
- * interface is now open */
- if (hw->mac_type == e1000_82573 &&
- e1000_check_mng_mode(hw))
- e1000_get_hw_control(adapter);
-
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
@@ -1503,7 +1297,6 @@ static int e1000_open(struct net_device *netdev)
return E1000_SUCCESS;
err_req_irq:
- e1000_release_hw_control(adapter);
e1000_power_down_phy(adapter);
e1000_free_all_rx_resources(adapter);
err_setup_rx:
@@ -1548,12 +1341,6 @@ static int e1000_close(struct net_device *netdev)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
- /* If AMT is enabled, let the firmware know that the network
- * interface is now closed */
- if (hw->mac_type == e1000_82573 &&
- e1000_check_mng_mode(hw))
- e1000_release_hw_control(adapter);
-
return 0;
}
@@ -1692,7 +1479,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
struct e1000_hw *hw = &adapter->hw;
- u32 tdlen, tctl, tipg, tarc;
+ u32 tdlen, tctl, tipg;
u32 ipgr1, ipgr2;
/* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1714,8 +1501,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
}
/* Set the default values for the Tx Inter Packet Gap timer */
- if (hw->mac_type <= e1000_82547_rev_2 &&
- (hw->media_type == e1000_media_type_fiber ||
+ if ((hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes))
tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
else
@@ -1728,10 +1514,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ipgr1 = DEFAULT_82542_TIPG_IPGR1;
ipgr2 = DEFAULT_82542_TIPG_IPGR2;
break;
- case e1000_80003es2lan:
- ipgr1 = DEFAULT_82543_TIPG_IPGR1;
- ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
- break;
default:
ipgr1 = DEFAULT_82543_TIPG_IPGR1;
ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1754,21 +1536,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
- if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
- tarc = er32(TARC0);
- /* set the speed mode bit, we'll clear it if we're not at
- * gigabit link later */
- tarc |= (1 << 21);
- ew32(TARC0, tarc);
- } else if (hw->mac_type == e1000_80003es2lan) {
- tarc = er32(TARC0);
- tarc |= 1;
- ew32(TARC0, tarc);
- tarc = er32(TARC1);
- tarc |= 1;
- ew32(TARC1, tarc);
- }
-
e1000_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
@@ -1804,7 +1571,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
@@ -1817,10 +1583,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
}
memset(rxdr->buffer_info, 0, size);
- if (hw->mac_type <= e1000_82547_rev_2)
- desc_len = sizeof(struct e1000_rx_desc);
- else
- desc_len = sizeof(union e1000_rx_desc_packet_split);
+ desc_len = sizeof(struct e1000_rx_desc);
/* Round up to nearest 4K */
@@ -1977,7 +1740,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
struct e1000_hw *hw = &adapter->hw;
- u32 rdlen, rctl, rxcsum, ctrl_ext;
+ u32 rdlen, rctl, rxcsum;
if (adapter->netdev->mtu > ETH_DATA_LEN) {
rdlen = adapter->rx_ring[0].count *
@@ -2004,17 +1767,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(ITR, 1000000000 / (adapter->itr * 256));
}
- if (hw->mac_type >= e1000_82571) {
- ctrl_ext = er32(CTRL_EXT);
- /* Reset delay timers after every interrupt */
- ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
- /* Auto-Mask interrupts upon ICR access */
- ctrl_ext |= E1000_CTRL_EXT_IAME;
- ew32(IAM, 0xffffffff);
- ew32(CTRL_EXT, ctrl_ext);
- E1000_WRITE_FLUSH();
- }
-
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
switch (adapter->num_rx_queues) {
@@ -2329,22 +2081,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
e1000_rar_set(hw, hw->mac_addr, 0);
- /* With 82571 controllers, LAA may be overwritten (with the default)
- * due to controller reset from the other port. */
- if (hw->mac_type == e1000_82571) {
- /* activate the work around */
- hw->laa_is_present = 1;
-
- /* Hold a copy of the LAA in RAR[14] This is done so that
- * between the time RAR[0] gets clobbered and the time it
- * gets fixed (in e1000_watchdog), the actual LAA is in one
- * of the RARs and no incoming packets directed to this port
- * are dropped. Eventaully the LAA will be in RAR[0] and
- * RAR[14] */
- e1000_rar_set(hw, hw->mac_addr,
- E1000_RAR_ENTRIES - 1);
- }
-
if (hw->mac_type == e1000_82542_rev2_0)
e1000_leave_82542_rst(adapter);
@@ -2371,9 +2107,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
- int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
- E1000_NUM_MTA_REGISTERS_ICH8LAN :
- E1000_NUM_MTA_REGISTERS;
+ int mta_reg_count = E1000_NUM_MTA_REGISTERS;
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
@@ -2381,13 +2115,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
return;
}
- if (hw->mac_type == e1000_ich8lan)
- rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
-
- /* reserve RAR[14] for LAA over-write work-around */
- if (hw->mac_type == e1000_82571)
- rar_entries--;
-
/* Check for Promiscuous and All Multicast modes */
rctl = er32(RCTL);
@@ -2396,15 +2123,13 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
rctl &= ~E1000_RCTL_VFE;
} else {
- if (netdev->flags & IFF_ALLMULTI) {
+ if (netdev->flags & IFF_ALLMULTI)
rctl |= E1000_RCTL_MPE;
- } else {
+ else
rctl &= ~E1000_RCTL_MPE;
- }
- if (adapter->hw.mac_type != e1000_ich8lan)
- /* Enable VLAN filter if there is a VLAN */
- if (adapter->vlgrp)
- rctl |= E1000_RCTL_VFE;
+ /* Enable VLAN filter if there is a VLAN */
+ if (adapter->vlgrp)
+ rctl |= E1000_RCTL_VFE;
}
if (netdev->uc.count > rar_entries - 1) {
@@ -2427,7 +2152,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*
* RAR 0 is used for the station MAC adddress
* if there are not 14 addresses, go ahead and clear the filters
- * -- with 82571 controllers only 0-13 entries are filled here
*/
i = 1;
if (use_uc)
@@ -2521,12 +2245,46 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
adapter->tx_fifo_head = 0;
atomic_set(&adapter->tx_fifo_stall, 0);
netif_wake_queue(netdev);
- } else {
+ } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
}
}
}
+static bool e1000_has_link(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = false;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the e1000_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->media_type) {
+ case e1000_media_type_copper:
+ if (hw->get_link_status) {
+ e1000_check_for_link(hw);
+ link_active = !hw->get_link_status;
+ } else {
+ link_active = true;
+ }
+ break;
+ case e1000_media_type_fiber:
+ e1000_check_for_link(hw);
+ link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ e1000_check_for_link(hw);
+ link_active = hw->serdes_has_link;
+ break;
+ default:
+ break;
+ }
+
+ return link_active;
+}
+
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -2538,33 +2296,16 @@ static void e1000_watchdog(unsigned long data)
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
- s32 ret_val;
-
- ret_val = e1000_check_for_link(hw);
- if ((ret_val == E1000_ERR_PHY) &&
- (hw->phy_type == e1000_phy_igp_3) &&
- (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
- /* See e1000_kumeran_lock_loss_workaround() */
- DPRINTK(LINK, INFO,
- "Gigabit has been disabled, downgrading speed\n");
- }
- if (hw->mac_type == e1000_82573) {
- e1000_enable_tx_pkt_filtering(hw);
- if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
- e1000_update_mng_vlan(adapter);
- }
-
- if ((hw->media_type == e1000_media_type_internal_serdes) &&
- !(er32(TXCW) & E1000_TXCW_ANE))
- link = !hw->serdes_link_down;
- else
- link = er32(STATUS) & E1000_STATUS_LU;
+ link = e1000_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link)
+ goto link_up;
if (link) {
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
bool txb2b = true;
+ /* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -2589,7 +2330,7 @@ static void e1000_watchdog(unsigned long data)
case SPEED_10:
txb2b = false;
netdev->tx_queue_len = 10;
- adapter->tx_timeout_factor = 8;
+ adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = false;
@@ -2598,52 +2339,16 @@ static void e1000_watchdog(unsigned long data)
break;
}
- if ((hw->mac_type == e1000_82571 ||
- hw->mac_type == e1000_82572) &&
- !txb2b) {
- u32 tarc0;
- tarc0 = er32(TARC0);
- tarc0 &= ~(1 << 21);
- ew32(TARC0, tarc0);
- }
-
- /* disable TSO for pcie and 10/100 speeds, to avoid
- * some hardware issues */
- if (!adapter->tso_force &&
- hw->bus_type == e1000_bus_type_pci_express){
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- DPRINTK(PROBE,INFO,
- "10/100 speed: disabling TSO\n");
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- break;
- case SPEED_1000:
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- break;
- default:
- /* oops */
- break;
- }
- }
-
- /* enable transmits in the hardware, need to do this
- * after setting TARC0 */
+ /* enable transmits in the hardware */
tctl = er32(TCTL);
tctl |= E1000_TCTL_EN;
ew32(TCTL, tctl);
netif_carrier_on(netdev);
- mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
adapter->smartspeed = 0;
- } else {
- /* make sure the receive unit is started */
- if (hw->rx_needs_kicking) {
- u32 rctl = er32(RCTL);
- ew32(RCTL, rctl | E1000_RCTL_EN);
- }
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -2652,21 +2357,16 @@ static void e1000_watchdog(unsigned long data)
printk(KERN_INFO "e1000: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
- mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
-
- /* 80003ES2LAN workaround--
- * For packet buffer work-around on link down event;
- * disable receives in the ISR and
- * reset device here in the watchdog
- */
- if (hw->mac_type == e1000_80003es2lan)
- /* reset device */
- schedule_work(&adapter->reset_task);
+
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
e1000_smartspeed(adapter);
}
+link_up:
e1000_update_stats(adapter);
hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
@@ -2700,13 +2400,10 @@ static void e1000_watchdog(unsigned long data)
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = true;
- /* With 82571 controllers, LAA may be overwritten due to controller
- * reset from the other port. Set the appropriate LAA in RAR[0] */
- if (hw->mac_type == e1000_82571 && hw->laa_is_present)
- e1000_rar_set(hw, hw->mac_addr, 0);
-
/* Reset the timer */
- mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
}
enum latency_range {
@@ -2718,6 +2415,11 @@ enum latency_range {
/**
* e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
* computation is faster updates and more accurate ITR for the current
@@ -2727,10 +2429,6 @@ enum latency_range {
* while increasing bulk throughput.
* this functionality is controlled by the InterruptThrottleRate module
* parameter (see e1000_param.c)
- * @adapter: pointer to adapter
- * @itr_setting: current adapter->itr
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
**/
static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
u16 itr_setting, int packets, int bytes)
@@ -3035,8 +2733,9 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size -= 4;
buffer_info->length = size;
- buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+ /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
+ buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
buffer_info->next_to_watch = i;
len -= size;
@@ -3071,13 +2770,14 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
* Avoid terminating buffers within evenly-aligned
* dwords. */
if (unlikely(adapter->pcix_82544 &&
- !((unsigned long)(frag->page+offset+size-1) & 4) &&
- size > 4))
+ !((unsigned long)(page_to_phys(frag->page) + offset
+ + size - 1) & 4) &&
+ size > 4))
size -= 4;
buffer_info->length = size;
- buffer_info->dma = map[f] + offset;
buffer_info->time_stamp = jiffies;
+ buffer_info->dma = map[f] + offset;
buffer_info->next_to_watch = i;
len -= size;
@@ -3186,41 +2886,6 @@ no_fifo_stall_required:
return 0;
}
-#define MINIMUM_DHCP_PACKET_SIZE 282
-static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
- struct sk_buff *skb)
-{
- struct e1000_hw *hw = &adapter->hw;
- u16 length, offset;
- if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
- ( hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
- return 0;
- }
- if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- if ((htons(ETH_P_IP) == eth->h_proto)) {
- const struct iphdr *ip =
- (struct iphdr *)((u8 *)skb->data+14);
- if (IPPROTO_UDP == ip->protocol) {
- struct udphdr *udp =
- (struct udphdr *)((u8 *)ip +
- (ip->ihl << 2));
- if (ntohs(udp->dest) == 67) {
- offset = (u8 *)udp + 8 - skb->data;
- length = skb->len - offset;
-
- return e1000_mng_write_dhcp_info(hw,
- (u8 *)udp + 8,
- length);
- }
- }
- }
- }
- return 0;
-}
-
static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3279,11 +2944,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- /* 82571 and newer doesn't need the workaround that limited descriptor
- * length to 4kB */
- if (hw->mac_type >= e1000_82571)
- max_per_txd = 8192;
-
mss = skb_shinfo(skb)->gso_size;
/* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
@@ -3296,9 +2956,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data */
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
@@ -3313,10 +2970,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
break;
/* fall through */
- case e1000_82571:
- case e1000_82572:
- case e1000_82573:
- case e1000_ich8lan:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
DPRINTK(DRV, ERR,
@@ -3361,11 +3014,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (adapter->pcix_82544)
count += nr_frags;
-
- if (hw->tx_pkt_filtering &&
- (hw->mac_type == e1000_82573))
- e1000_transfer_dhcp_info(adapter, skb);
-
/* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
@@ -3374,7 +3022,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (unlikely(hw->mac_type == e1000_82547)) {
if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
netif_stop_queue(netdev);
- mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->tx_fifo_stall_timer,
+ jiffies + 1);
return NETDEV_TX_BUSY;
}
}
@@ -3393,14 +3043,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
if (likely(tso)) {
- tx_ring->last_tx_tso = 1;
+ if (likely(hw->mac_type != e1000_82544))
+ tx_ring->last_tx_tso = 1;
tx_flags |= E1000_TX_FLAGS_TSO;
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
- /* Old method was to assume IPv4 packet by default if TSO was enabled.
- * 82571 hardware supports TSO capabilities for IPv6 as well...
- * no longer assume, we must. */
if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
@@ -3472,7 +3120,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- u16 eeprom_data = 0;
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3483,44 +3130,23 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
- case e1000_ich8lan:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
- case e1000_82573:
- /* Jumbo Frames not supported if:
- * - this is not an 82573L device
- * - ASPM is enabled in any way (0x1A bits 3:2) */
- e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
- &eeprom_data);
- if ((hw->device_id != E1000_DEV_ID_82573L) ||
- (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
- if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- DPRINTK(PROBE, ERR,
- "Jumbo Frames not supported.\n");
- return -EINVAL;
- }
- break;
- }
- /* ERT will be enabled later to enable wire speed receives */
-
- /* fall through to get support */
- case e1000_82571:
- case e1000_82572:
- case e1000_80003es2lan:
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
- if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
- return -EINVAL;
- }
- break;
default:
/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
break;
}
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ /* e1000_down has a dependency on max_frame_size */
+ hw->max_frame_size = max_frame;
+ if (netif_running(netdev))
+ e1000_down(adapter);
+
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
@@ -3549,11 +3175,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n",
+ netdev->name, netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
- hw->max_frame_size = max_frame;
if (netif_running(netdev))
- e1000_reinit_locked(adapter);
+ e1000_up(adapter);
+ else
+ e1000_reset(adapter);
+
+ clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
}
@@ -3596,14 +3227,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.mprc += er32(MPRC);
adapter->stats.roc += er32(ROC);
- if (hw->mac_type != e1000_ich8lan) {
- adapter->stats.prc64 += er32(PRC64);
- adapter->stats.prc127 += er32(PRC127);
- adapter->stats.prc255 += er32(PRC255);
- adapter->stats.prc511 += er32(PRC511);
- adapter->stats.prc1023 += er32(PRC1023);
- adapter->stats.prc1522 += er32(PRC1522);
- }
+ adapter->stats.prc64 += er32(PRC64);
+ adapter->stats.prc127 += er32(PRC127);
+ adapter->stats.prc255 += er32(PRC255);
+ adapter->stats.prc511 += er32(PRC511);
+ adapter->stats.prc1023 += er32(PRC1023);
+ adapter->stats.prc1522 += er32(PRC1522);
adapter->stats.symerrs += er32(SYMERRS);
adapter->stats.mpc += er32(MPC);
@@ -3632,14 +3261,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.toth += er32(TOTH);
adapter->stats.tpr += er32(TPR);
- if (hw->mac_type != e1000_ich8lan) {
- adapter->stats.ptc64 += er32(PTC64);
- adapter->stats.ptc127 += er32(PTC127);
- adapter->stats.ptc255 += er32(PTC255);
- adapter->stats.ptc511 += er32(PTC511);
- adapter->stats.ptc1023 += er32(PTC1023);
- adapter->stats.ptc1522 += er32(PTC1522);
- }
+ adapter->stats.ptc64 += er32(PTC64);
+ adapter->stats.ptc127 += er32(PTC127);
+ adapter->stats.ptc255 += er32(PTC255);
+ adapter->stats.ptc511 += er32(PTC511);
+ adapter->stats.ptc1023 += er32(PTC1023);
+ adapter->stats.ptc1522 += er32(PTC1522);
adapter->stats.mptc += er32(MPTC);
adapter->stats.bptc += er32(BPTC);
@@ -3659,20 +3286,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.tsctc += er32(TSCTC);
adapter->stats.tsctfc += er32(TSCTFC);
}
- if (hw->mac_type > e1000_82547_rev_2) {
- adapter->stats.iac += er32(IAC);
- adapter->stats.icrxoc += er32(ICRXOC);
-
- if (hw->mac_type != e1000_ich8lan) {
- adapter->stats.icrxptc += er32(ICRXPTC);
- adapter->stats.icrxatc += er32(ICRXATC);
- adapter->stats.ictxptc += er32(ICTXPTC);
- adapter->stats.ictxatc += er32(ICTXATC);
- adapter->stats.ictxqec += er32(ICTXQEC);
- adapter->stats.ictxqmtc += er32(ICTXQMTC);
- adapter->stats.icrxdmtc += er32(ICRXDMTC);
- }
- }
/* Fill out the OS statistics structure */
adapter->net_stats.multicast = adapter->stats.mprc;
@@ -3731,49 +3344,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
}
/**
- * e1000_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-
-static irqreturn_t e1000_intr_msi(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 icr = er32(ICR);
-
- /* in NAPI mode read ICR disables interrupts using IAM */
-
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
- hw->get_link_status = 1;
- /* 80003ES2LAN workaround-- For packet buffer work-around on
- * link down event; disable receives here in the ISR and reset
- * adapter in watchdog */
- if (netif_carrier_ok(netdev) &&
- (hw->mac_type == e1000_80003es2lan)) {
- /* disable receives */
- u32 rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- }
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->flags))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
- }
-
- if (likely(napi_schedule_prep(&adapter->napi))) {
- adapter->total_tx_bytes = 0;
- adapter->total_tx_packets = 0;
- adapter->total_rx_bytes = 0;
- adapter->total_rx_packets = 0;
- __napi_schedule(&adapter->napi);
- } else
- e1000_irq_enable(adapter);
-
- return IRQ_HANDLED;
-}
-
-/**
* e1000_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
@@ -3784,43 +3354,22 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 rctl, icr = er32(ICR);
+ u32 icr = er32(ICR);
if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */
- /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
- * not set, then the adapter didn't send an interrupt */
- if (unlikely(hw->mac_type >= e1000_82571 &&
- !(icr & E1000_ICR_INT_ASSERTED)))
- return IRQ_NONE;
-
- /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
- * need for the IMC write */
-
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
- /* 80003ES2LAN workaround--
- * For packet buffer work-around on link down event;
- * disable receives here in the ISR and
- * reset adapter in watchdog
- */
- if (netif_carrier_ok(netdev) &&
- (hw->mac_type == e1000_80003es2lan)) {
- /* disable receives */
- rctl = er32(RCTL);
- ew32(RCTL, rctl & ~E1000_RCTL_EN);
- }
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->flags))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (unlikely(hw->mac_type < e1000_82571)) {
- /* disable interrupts, without the synchronize_irq bit */
- ew32(IMC, ~0);
- E1000_WRITE_FLUSH();
- }
+ /* disable interrupts, without the synchronize_irq bit */
+ ew32(IMC, ~0);
+ E1000_WRITE_FLUSH();
+
if (likely(napi_schedule_prep(&adapter->napi))) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -3844,17 +3393,13 @@ static irqreturn_t e1000_intr(int irq, void *data)
static int e1000_clean(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
- struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 0, work_done = 0;
-
- adapter = netdev_priv(poll_dev);
+ int tx_clean_complete = 0, work_done = 0;
- tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
+ tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
- adapter->clean_rx(adapter, &adapter->rx_ring[0],
- &work_done, budget);
+ adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
- if (!tx_cleaned)
+ if (!tx_clean_complete)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
@@ -3925,7 +3470,9 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* sees the new next_to_clean.
*/
smp_mb();
- if (netif_queue_stopped(netdev)) {
+
+ if (netif_queue_stopped(netdev) &&
+ !(test_bit(__E1000_DOWN, &adapter->flags))) {
netif_wake_queue(netdev);
++adapter->restart_queue;
}
@@ -3935,8 +3482,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[i].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
+ if (tx_ring->buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
(adapter->tx_timeout_factor * HZ))
&& !(er32(STATUS) & E1000_STATUS_TXOFF)) {
@@ -3958,7 +3505,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
tx_ring->next_to_clean,
- tx_ring->buffer_info[i].time_stamp,
+ tx_ring->buffer_info[eop].time_stamp,
eop,
jiffies,
eop_desc->upper.fields.status);
@@ -3999,25 +3546,13 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
}
/* TCP/UDP Checksum has not been calculated */
- if (hw->mac_type <= e1000_82547_rev_2) {
- if (!(status & E1000_RXD_STAT_TCPCS))
- return;
- } else {
- if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
- return;
- }
+ if (!(status & E1000_RXD_STAT_TCPCS))
+ return;
+
/* It must be a TCP or UDP packet with a valid checksum */
if (likely(status & E1000_RXD_STAT_TCPCS)) {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (hw->mac_type > e1000_82547_rev_2) {
- /* IP fragment with UDP payload */
- /* Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)htons(csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
}
adapter->hw_csum_good++;
}
@@ -4814,20 +4349,6 @@ void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
pcix_set_mmrbc(adapter->pdev, mmrbc);
}
-s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
-{
- struct e1000_adapter *adapter = hw->back;
- u16 cap_offset;
-
- cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (!cap_offset)
- return -E1000_ERR_CONFIG;
-
- pci_read_config_word(adapter->pdev, cap_offset + reg, value);
-
- return E1000_SUCCESS;
-}
-
void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
{
outl(value, port);
@@ -4850,33 +4371,27 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
ctrl |= E1000_CTRL_VME;
ew32(CTRL, ctrl);
- if (adapter->hw.mac_type != e1000_ich8lan) {
- /* enable VLAN receive filtering */
- rctl = er32(RCTL);
- rctl &= ~E1000_RCTL_CFIEN;
- if (!(netdev->flags & IFF_PROMISC))
- rctl |= E1000_RCTL_VFE;
- ew32(RCTL, rctl);
- e1000_update_mng_vlan(adapter);
- }
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_CFIEN;
+ if (!(netdev->flags & IFF_PROMISC))
+ rctl |= E1000_RCTL_VFE;
+ ew32(RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
} else {
/* disable VLAN tag insert/strip */
ctrl = er32(CTRL);
ctrl &= ~E1000_CTRL_VME;
ew32(CTRL, ctrl);
- if (adapter->hw.mac_type != e1000_ich8lan) {
- /* disable VLAN receive filtering */
- rctl = er32(RCTL);
- rctl &= ~E1000_RCTL_VFE;
- ew32(RCTL, rctl);
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ ew32(RCTL, rctl);
- if (adapter->mng_vlan_id !=
- (u16)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev,
- adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
+ if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
}
@@ -4913,14 +4428,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
- if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- (vid == adapter->mng_vlan_id)) {
- /* release control to f/w */
- e1000_release_hw_control(adapter);
- return;
- }
-
/* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
@@ -5031,16 +4538,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
if (hw->media_type == e1000_media_type_fiber ||
- hw->media_type == e1000_media_type_internal_serdes) {
+ hw->media_type == e1000_media_type_internal_serdes) {
/* keep the laser running in D3 */
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
ew32(CTRL_EXT, ctrl_ext);
}
- /* Allow time for pending master requests to run */
- e1000_disable_pciex_master(hw);
-
ew32(WUC, E1000_WUC_PME_EN);
ew32(WUFC, wufc);
} else {
@@ -5056,16 +4560,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (adapter->en_mng_pt)
*enable_wake = true;
- if (hw->phy_type == e1000_phy_igp_3)
- e1000_phy_powerdown_workaround(hw);
-
if (netif_running(netdev))
e1000_free_irq(adapter);
- /* Release control of h/w to f/w. If f/w is AMT enabled, this
- * would have already happened in close and is redundant. */
- e1000_release_hw_control(adapter);
-
pci_disable_device(pdev);
return 0;
@@ -5131,14 +4628,6 @@ static int e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /* If the controller is 82573 and f/w is AMT, do not set
- * DRV_LOAD until the interface is up. For all other cases,
- * let the f/w know that the h/w is now under the control
- * of the driver. */
- if (hw->mac_type != e1000_82573 ||
- !e1000_check_mng_mode(hw))
- e1000_get_hw_control(adapter);
-
return 0;
}
#endif
@@ -5174,7 +4663,7 @@ static void e1000_netpoll(struct net_device *netdev)
/**
* e1000_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
@@ -5243,7 +4732,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
e1000_init_manageability(adapter);
@@ -5255,15 +4743,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
}
netif_device_attach(netdev);
-
- /* If the controller is 82573 and f/w is AMT, do not set
- * DRV_LOAD until the interface is up. For all other cases,
- * let the f/w know that the h/w is now under the control
- * of the driver. */
- if (hw->mac_type != e1000_82573 ||
- !e1000_check_mng_mode(hw))
- e1000_get_hw_control(adapter);
-
}
/* e1000_main.c */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 213437d1315..38d2741ccae 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -518,22 +518,6 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
adapter->smart_power_down = opt.def;
}
}
- { /* Kumeran Lock Loss Workaround */
- opt = (struct e1000_option) {
- .type = enable_option,
- .name = "Kumeran Lock Loss Workaround",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-
- if (num_KumeranLockLoss > bd) {
- unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
- e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
- adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
- } else {
- adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
- }
- }
switch (adapter->hw.media_type) {
case e1000_media_type_fiber:
@@ -626,12 +610,6 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
.p = dplx_list }}
};
- if (e1000_check_phy_reset_block(&adapter->hw)) {
- DPRINTK(PROBE, INFO,
- "Link active due to SoL/IDER Session. "
- "Speed/Duplex/AutoNeg parameter ignored.\n");
- return;
- }
if (num_Duplex > bd) {
dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b53b40ba88a..d1e0563a67d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.pba = 20,
- .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
+ .max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
@@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.pba = 20,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 16c193a6c95..0687c6aa4e4 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4982,12 +4982,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_pci_reg;
/* AER (Advanced Error Reporting) hooks */
- err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
- "0x%x\n", err);
- /* non-fatal, continue */
- }
+ pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
/* PCI config space info */
@@ -5263,7 +5258,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- int err;
/*
* flush_scheduled work may reschedule our watchdog task, so
@@ -5299,10 +5293,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
/* AER disable */
- err = pci_disable_pcie_error_reporting(pdev);
- if (err)
- dev_err(&pdev->dev,
- "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 977c3d35827..41bd7aeafd8 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3083,7 +3083,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_poll_controller = ehea_netpoll,
#endif
.ndo_get_stats = ehea_get_stats,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = ehea_set_multicast_list,
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index d4d9a3eda69..f5b96cadeb2 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -111,6 +111,7 @@
* Sorry, I had to rewrite most of this for 2.5.x -DaveM
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b7311bc0025..9c950bb5e90 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -17,8 +17,13 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <net/ethoc.h>
+static int buffer_size = 0x8000; /* 32 KBytes */
+module_param(buffer_size, int, 0);
+MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
+
/* register offsets */
#define MODER 0x00
#define INT_SOURCE 0x04
@@ -167,6 +172,7 @@
* struct ethoc - driver-private device structure
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
+ * @dma_alloc: dma allocated buffer size
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -185,6 +191,7 @@
struct ethoc {
void __iomem *iobase;
void __iomem *membase;
+ int dma_alloc;
unsigned int num_tx;
unsigned int cur_tx;
@@ -284,7 +291,7 @@ static int ethoc_init_ring(struct ethoc *dev)
dev->cur_rx = 0;
/* setup transmission buffers */
- bd.addr = 0;
+ bd.addr = virt_to_phys(dev->membase);
bd.stat = TX_BD_IRQ | TX_BD_CRC;
for (i = 0; i < dev->num_tx; i++) {
@@ -295,7 +302,6 @@ static int ethoc_init_ring(struct ethoc *dev)
bd.addr += ETHOC_BUFSIZ;
}
- bd.addr = dev->num_tx * ETHOC_BUFSIZ;
bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
for (i = 0; i < dev->num_rx; i++) {
@@ -400,8 +406,12 @@ static int ethoc_rx(struct net_device *dev, int limit)
if (ethoc_update_rx_stats(priv, &bd) == 0) {
int size = bd.stat >> 16;
struct sk_buff *skb = netdev_alloc_skb(dev, size);
+
+ size -= 4; /* strip the CRC */
+ skb_reserve(skb, 2); /* align TCP/IP header */
+
if (likely(skb)) {
- void *src = priv->membase + bd.addr;
+ void *src = phys_to_virt(bd.addr);
memcpy_fromio(skb_put(skb, size), src, size);
skb->protocol = eth_type_trans(skb, dev);
priv->stats.rx_packets++;
@@ -653,9 +663,10 @@ static int ethoc_open(struct net_device *dev)
if (ret)
return ret;
- /* calculate the number of TX/RX buffers */
- num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ;
- priv->num_tx = min(min_tx, num_bd / 4);
+ /* calculate the number of TX/RX buffers, maximum 128 supported */
+ num_bd = min_t(unsigned int,
+ 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
+ priv->num_tx = max(min_tx, num_bd / 4);
priv->num_rx = num_bd - priv->num_tx;
ethoc_write(priv, TX_BD_NUM, priv->num_tx);
@@ -823,7 +834,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
else
bd.stat &= ~TX_BD_PAD;
- dest = priv->membase + bd.addr;
+ dest = phys_to_virt(bd.addr);
memcpy_toio(dest, skb->data, skb->len);
bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
@@ -903,22 +914,19 @@ static int ethoc_probe(struct platform_device *pdev)
/* obtain buffer memory space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain memory space\n");
- ret = -ENXIO;
- goto free;
- }
-
- mem = devm_request_mem_region(&pdev->dev, res->start,
+ if (res) {
+ mem = devm_request_mem_region(&pdev->dev, res->start,
res->end - res->start + 1, res->name);
- if (!mem) {
- dev_err(&pdev->dev, "cannot request memory space\n");
- ret = -ENXIO;
- goto free;
+ if (!mem) {
+ dev_err(&pdev->dev, "cannot request memory space\n");
+ ret = -ENXIO;
+ goto free;
+ }
+
+ netdev->mem_start = mem->start;
+ netdev->mem_end = mem->end;
}
- netdev->mem_start = mem->start;
- netdev->mem_end = mem->end;
/* obtain device IRQ number */
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -933,6 +941,7 @@ static int ethoc_probe(struct platform_device *pdev)
/* setup driver-private data */
priv = netdev_priv(netdev);
priv->netdev = netdev;
+ priv->dma_alloc = 0;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
mmio->end - mmio->start + 1);
@@ -942,12 +951,27 @@ static int ethoc_probe(struct platform_device *pdev)
goto error;
}
- priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start,
- mem->end - mem->start + 1);
- if (!priv->membase) {
- dev_err(&pdev->dev, "cannot remap memory space\n");
- ret = -ENXIO;
- goto error;
+ if (netdev->mem_end) {
+ priv->membase = devm_ioremap_nocache(&pdev->dev,
+ netdev->mem_start, mem->end - mem->start + 1);
+ if (!priv->membase) {
+ dev_err(&pdev->dev, "cannot remap memory space\n");
+ ret = -ENXIO;
+ goto error;
+ }
+ } else {
+ /* Allocate buffer memory */
+ priv->membase = dma_alloc_coherent(NULL,
+ buffer_size, (void *)&netdev->mem_start,
+ GFP_KERNEL);
+ if (!priv->membase) {
+ dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
+ buffer_size);
+ ret = -ENOMEM;
+ goto error;
+ }
+ netdev->mem_end = netdev->mem_start + buffer_size;
+ priv->dma_alloc = buffer_size;
}
/* Allow the platform setup code to pass in a MAC address. */
@@ -1034,6 +1058,9 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->dma_alloc)
+ dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+ netdev->mem_start);
free_netdev(netdev);
out:
return ret;
@@ -1059,7 +1086,9 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
-
+ if (priv->dma_alloc)
+ dma_free_coherent(NULL, priv->dma_alloc, priv->membase,
+ netdev->mem_start);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index b2a5ec8f372..dd4ba01fd92 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -145,6 +145,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index c40113f5896..66dace6d324 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
mpc52xx_fec_hw_init(dev);
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
- phy_start(priv->phydev);
- }
-
bcom_fec_rx_reset(priv->rx_dmatsk);
bcom_fec_tx_reset(priv->tx_dmatsk);
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 31e6d62b785..ee0f3c6d3f8 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
{ .compatible = "mpc5200b-fec-phy", },
{}
};
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
struct of_platform_driver mpc52xx_fec_mdio_driver = {
.name = "mpc5200b-fec-phy",
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0a1c2bb27d4..e1da4666f20 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -49,6 +49,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 2bc2d2b2064..ec2f5034457 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
#endif
{}
};
+MODULE_DEVICE_TABLE(of, fs_enet_match);
static struct of_platform_driver fs_enet_driver = {
.name = "fs_enet",
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 93b481b0e3c..24ff9f43a62 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
static struct of_platform_driver fs_enet_bb_mdio_driver = {
.name = "fsl-bb-mdio",
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a2d69c1cd07..96eba4280c5 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
#endif
{},
};
+MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
static struct of_platform_driver fs_enet_fec_mdio_driver = {
.name = "fsl-fec-mdio",
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d167090248e..6ac46486697 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
static struct of_platform_driver fsl_pq_mdio_driver = {
.name = "fsl-pq_mdio",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1e5289ffef6..5bf31f1509c 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:fsl-gianfar");
-
static struct of_device_id gfar_match[] =
{
{
@@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, gfar_match);
/* Structure for a device driver */
static struct of_platform_driver gfar_driver = {
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a09ac..f7519a59494 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -145,6 +145,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 7bcaf7c6624..e344c84c0ef 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index aa4488e871b..ed60fd66427 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -71,6 +71,7 @@
/*****************************************************************************/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/string.h>
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
index 88c59359602..1686f6dcbbc 100644
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -61,6 +61,7 @@
/*****************************************************************************/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/string.h>
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 0013c409782..91c5790c958 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -42,6 +42,7 @@
/*****************************************************************************/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/net.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 33b55f72974..db4b7f1603f 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -258,7 +258,7 @@ static void ax_bump(struct mkiss *ax)
}
if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
printk(KERN_INFO
- "mkiss: %s: Switchting to crc-smack\n",
+ "mkiss: %s: Switching to crc-smack\n",
ax->dev->name);
ax->crcmode = CRC_MODE_SMACK;
}
@@ -272,7 +272,7 @@ static void ax_bump(struct mkiss *ax)
}
if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
printk(KERN_INFO
- "mkiss: %s: Switchting to crc-flexnet\n",
+ "mkiss: %s: Switching to crc-flexnet\n",
ax->dev->name);
ax->crcmode = CRC_MODE_FLEX;
}
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index a9a1a99f02d..dd866513806 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -98,6 +98,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 89c82c5e63e..3fae8755979 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -24,6 +24,7 @@
*
*/
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
ret |= EMAC_MR1_TFS_2K;
break;
default:
- printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
dev->ndev->name, tx_size);
}
@@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
DBG2(dev, "__emac4_calc_base_mr1" NL);
switch(tx_size) {
+ case 16384:
+ ret |= EMAC4_MR1_TFS_16K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
@@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
ret |= EMAC4_MR1_TFS_2K;
break;
default:
- printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
+ printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
dev->ndev->name, tx_size);
}
@@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] =
},
{},
};
+MODULE_DEVICE_TABLE(of, emac_match);
static struct of_platform_driver emac_driver = {
.name = "emac",
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 0afc2cf5c52..d34adf99fc6 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -153,6 +153,7 @@ struct emac_regs {
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
#define EMAC4_MR1_JPSM 0x00000800
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index a0231cd079f..7d76bb085e1 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -286,41 +286,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
}
/**
- * igb_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- *
- * Updates entire Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- **/
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
-{
- u32 hash_value, hash_bit, hash_reg;
- int i;
-
- /* clear mta_shadow */
- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
-
- /* update mta_shadow from mc_addr_list */
- for (i = 0; (u32) i < mc_addr_count; i++) {
- hash_value = igb_hash_mc_addr(hw, mc_addr_list);
-
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
- mc_addr_list += (ETH_ALEN);
- }
-
- /* replace the entire MTA table */
- for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
- wrfl();
-}
-
-/**
* igb_hash_mc_addr - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
@@ -329,7 +294,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
* the multicast filter table array address and new table value. See
* igb_mta_set()
**/
-u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
@@ -392,6 +357,41 @@ u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
/**
+ * igb_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates entire Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
+{
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
+
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
+ hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+ wrfl();
+}
+
+/**
* igb_clear_hw_cntrs_base - Clear base hardware counters
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index 7518af8cbbf..bca17d88241 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -88,6 +88,5 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
-extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
#endif
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d004c359244..deaea8fa103 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/sched.h>
#include "igb.h"
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 5d6c1530a8c..714c3a4a44e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1246,12 +1246,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
- err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
- "0x%x\n", err);
- /* non-fatal, continue */
- }
+ pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -1628,7 +1623,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- int err;
/* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled */
@@ -1682,10 +1676,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
free_netdev(netdev);
- err = pci_disable_pcie_error_reporting(pdev);
- if (err)
- dev_err(&pdev->dev,
- "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b449ee..cb90d640007 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc51cb..b54d3b48045 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d7335397..8d713ebac15 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bcb88e..c0e0bb9401d 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
#include <linux/crc32.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1445e586519..84db145d2b5 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
}
/*
+ * Set the IrDA communications mode.
+ */
+static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
+{
+ if (si->pdata->transceiver_mode)
+ si->pdata->transceiver_mode(si->dev, mode);
+ else {
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_set_value(si->pdata->gpio_pwdown,
+ !(mode & IR_OFF) ^
+ !si->pdata->gpio_pwdown_inverted);
+ pxa2xx_transceiver_mode(si->dev, mode);
+ }
+}
+
+/*
* Set the IrDA communications speed.
*/
static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
pxa_irda_disable_clk(si);
/* set board transceiver to SIR mode */
- si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
+ pxa_irda_set_mode(si, IR_SIRMODE);
/* enable the STUART clock */
pxa_irda_enable_sirclk(si);
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
ICCR0 = 0;
/* set board transceiver to FIR mode */
- si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
+ pxa_irda_set_mode(si, IR_FIRMODE);
/* enable the FICP clock */
pxa_irda_enable_firclk(si);
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si)
local_irq_restore(flags);
/* power off board transceiver */
- si->pdata->transceiver_mode(si->dev, IR_OFF);
+ pxa_irda_set_mode(si, IR_OFF);
printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
}
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev)
if (err)
goto err_mem_5;
- if (si->pdata->startup)
+ if (gpio_is_valid(si->pdata->gpio_pwdown)) {
+ err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
+ if (err)
+ goto err_startup;
+ err = gpio_direction_output(si->pdata->gpio_pwdown,
+ !si->pdata->gpio_pwdown_inverted);
+ if (err) {
+ gpio_free(si->pdata->gpio_pwdown);
+ goto err_startup;
+ }
+ }
+
+ if (si->pdata->startup) {
err = si->pdata->startup(si->dev);
- if (err)
- goto err_startup;
+ if (err)
+ goto err_startup;
+ }
+
+ if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
+ dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
dev->netdev_ops = &pxa_irda_netdev_ops;
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev)
if (dev) {
struct pxa_irda *si = netdev_priv(dev);
unregister_netdev(dev);
+ if (gpio_is_valid(si->pdata->gpio_pwdown))
+ gpio_free(si->pdata->gpio_pwdown);
if (si->pdata->shutdown)
si->pdata->shutdown(si->dev);
kfree(si->tx_buff.head);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 38bf7cf2256..c412e802617 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si)
/*
* Ensure that the ports for this device are setup correctly.
*/
- if (si->pdata->startup)
- si->pdata->startup(si->dev);
+ if (si->pdata->startup) {
+ ret = si->pdata->startup(si->dev);
+ if (ret)
+ return ret;
+ }
/*
* Configure PPC for IRDA - we want to drive TXD2 low.
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c
index fcf287b749d..99e1ec02a01 100644
--- a/drivers/net/irda/toim3232-sir.c
+++ b/drivers/net/irda/toim3232-sir.c
@@ -120,6 +120,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/sched.h>
#include <net/irda/irda.h>
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index e36e951cbc6..aa7286bc436 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -495,7 +495,7 @@ static void veth_take_cap_ack(struct veth_lpar_connection *cnx,
cnx->remote_lp);
} else {
memcpy(&cnx->cap_ack_event, event,
- sizeof(&cnx->cap_ack_event));
+ sizeof(cnx->cap_ack_event));
cnx->state |= VETH_STATE_GOTCAPACK;
veth_kick_statemachine(cnx);
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index dd688d45e9c..385be601666 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -267,7 +267,8 @@ struct ixgbe_adapter {
enum ixgbe_fc_mode last_lfc_mode;
/* Interrupt Throttle Rate */
- u32 itr_setting;
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
u16 eitr_low;
u16 eitr_high;
@@ -351,7 +352,8 @@ struct ixgbe_adapter {
struct ixgbe_hw_stats stats;
/* Interrupt Throttle Rate */
- u32 eitr_param;
+ u32 rx_eitr_param;
+ u32 tx_eitr_param;
unsigned long state;
u64 tx_busy;
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 56b12f3192f..e2d5343f127 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -425,7 +425,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 2ec58dcdb82..34b04924c8a 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -330,6 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_82599_KX4:
+ case IXGBE_DEV_ID_82599_KX4_MEZZ:
+ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_XAUI_LOM:
/* Default device ID is mezzanine card KX/KX4 */
media_type = ixgbe_media_type_backplane;
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 6621e172df3..40ff120a9ad 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1355,9 +1355,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @addr_list: the list of new addresses
- * @addr_count: number of addresses
- * @next: iterator function to walk the address list
+ * @uc_list: the list of new addresses
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1663,7 +1661,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
@@ -1734,75 +1732,140 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
s32 ret_val = 0;
ixgbe_link_speed speed;
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+ u32 links2, anlp1_reg, autoc_reg, links;
bool link_up;
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
* - FC autoneg is disabled, or if
- * - we don't have multispeed fiber, or if
- * - we're not running at 1G, or if
- * - link is not up, or if
- * - link is up but AN did not complete, or if
- * - link is up and AN completed but timed out
+ * - link is not up.
*
- * Since we're being called from an LSC, link is already know to be up.
+ * Since we're being called from an LSC, link is already known to be up.
* So use link_up_wait_to_complete=false.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
- linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
-
- if (hw->fc.disable_fc_autoneg ||
- !hw->phy.multispeed_fiber ||
- (speed != IXGBE_LINK_SPEED_1GB_FULL) ||
- !link_up ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
- ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+
+ if (hw->fc.disable_fc_autoneg || (!link_up)) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
- hw_dbg(hw, "Autoneg FC was skipped.\n");
goto out;
}
/*
+ * On backplane, bail out if
+ * - backplane autoneg was not completed, or if
+ * - link partner is not AN enabled
+ */
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
+ links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+ if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
+ ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+ }
+
+ /*
+ * On multispeed fiber at 1g, bail out if
+ * - link is up but AN did not complete, or if
+ * - link is up and AN completed but timed out
+ */
+ if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) {
+ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+ if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+ ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ goto out;
+ }
+ }
+
+ /*
* Read the AN advertisement and LP ability registers and resolve
* local flow control settings accordingly
*/
- pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
- if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+ (hw->phy.media_type != ixgbe_media_type_backplane)) {
+ pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+ if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+ }
+ } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
+ !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
+ (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
+ }
+ }
+
+ if (hw->phy.media_type == ixgbe_media_type_backplane) {
/*
- * Now we need to check if the user selected Rx ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ * Read the 10g AN autoc and LP ability registers and resolve
+ * local flow control settings accordingly
*/
- if (hw->fc.requested_mode == ixgbe_fc_full) {
- hw->fc.current_mode = ixgbe_fc_full;
- hw_dbg(hw, "Flow Control = FULL.\n");
- } else {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+ if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) {
+ /*
+ * Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise RX
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_full) {
+ hw->fc.current_mode = ixgbe_fc_full;
+ hw_dbg(hw, "Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_rx_pause;
+ hw_dbg(hw, "Flow Control=RX PAUSE only\n");
+ }
+ } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
+ hw->fc.current_mode = ixgbe_fc_tx_pause;
+ hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
+ } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) &&
+ (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) &&
+ !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) &&
+ (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) {
hw->fc.current_mode = ixgbe_fc_rx_pause;
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
+ } else {
+ hw->fc.current_mode = ixgbe_fc_none;
+ hw_dbg(hw, "Flow Control = NONE.\n");
}
- } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_tx_pause;
- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
- } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) &&
- !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) &&
- (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) {
- hw->fc.current_mode = ixgbe_fc_rx_pause;
- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
- } else {
- hw->fc.current_mode = ixgbe_fc_none;
- hw_dbg(hw, "Flow Control = NONE.\n");
}
-
/* Record that current_mode is the result of a successful autoneg */
hw->fc.fc_was_autonegged = true;
@@ -1919,7 +1982,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
- ret_val = -IXGBE_ERR_CONFIG;
+ ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
@@ -1927,9 +1990,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
- /* Enable and restart autoneg to inform the link partner */
- reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART;
-
/* Disable AN timeout */
if (hw->fc.strict_ieee)
reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
@@ -1937,6 +1997,70 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+ /*
+ * Set up the 10G flow control advertisement registers so the HW
+ * can do fc autoneg once the cable is plugged in. If we end up
+ * using 1g instead, this is harmless.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+ /*
+ * The possible values of fc.requested_mode are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but
+ * we do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ * other: Invalid.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ /* Flow control completely disabled by software override. */
+ reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+ case ixgbe_fc_rx_pause:
+ /*
+ * Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+ case ixgbe_fc_tx_pause:
+ /*
+ * Tx Flow control is enabled, and Rx Flow control is
+ * disabled by software override.
+ */
+ reg |= (IXGBE_AUTOC_ASM_PAUSE);
+ reg &= ~(IXGBE_AUTOC_SYM_PAUSE);
+ break;
+ case ixgbe_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by SW override. */
+ reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE);
+ break;
+#ifdef CONFIG_DCB
+ case ixgbe_fc_pfc:
+ goto out;
+ break;
+#endif /* CONFIG_DCB */
+ default:
+ hw_dbg(hw, "Flow control param set incorrectly\n");
+ ret_val = IXGBE_ERR_CONFIG;
+ goto out;
+ break;
+ }
+ /*
+ * AUTOC restart handles negotiation of 1G and 10G. There is
+ * no need to set the PCS1GCTL register.
+ */
+ reg |= IXGBE_AUTOC_AN_RESTART;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg);
+ hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+
out:
return ret_val;
}
@@ -2000,7 +2124,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
while (timeout) {
if (ixgbe_get_eeprom_semaphore(hw))
- return -IXGBE_ERR_SWFW_SYNC;
+ return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask)))
@@ -2017,7 +2141,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n");
- return -IXGBE_ERR_SWFW_SYNC;
+ return IXGBE_ERR_SWFW_SYNC;
}
gssr |= swmask;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 026e94a9984..fa314cb005a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -53,6 +53,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
{"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)},
{"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)},
+ {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
+ {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
+ {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
+ {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
{"lsc_int", IXGBE_STAT(lsc_int)},
{"tx_busy", IXGBE_STAT(tx_busy)},
{"non_eop_descs", IXGBE_STAT(non_eop_descs)},
@@ -1929,7 +1933,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
/* only valid if in constant ITR mode */
- switch (adapter->itr_setting) {
+ switch (adapter->rx_itr_setting) {
case 0:
/* throttling disabled */
ec->rx_coalesce_usecs = 0;
@@ -1940,9 +1944,25 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
break;
default:
/* fixed interrupt rate mode */
- ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
+ ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
break;
}
+
+ /* only valid if in constant ITR mode */
+ switch (adapter->tx_itr_setting) {
+ case 0:
+ /* throttling disabled */
+ ec->tx_coalesce_usecs = 0;
+ break;
+ case 1:
+ /* dynamic ITR mode */
+ ec->tx_coalesce_usecs = 1;
+ break;
+ default:
+ ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
+ break;
+ }
+
return 0;
}
@@ -1953,6 +1973,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_q_vector *q_vector;
int i;
+ /*
+ * don't accept tx specific changes if we've got mixed RxTx vectors
+ * test and jump out here if needed before changing the rx numbers
+ */
+ if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
+ adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ return -EINVAL;
+
if (ec->tx_max_coalesced_frames_irq)
adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
@@ -1963,26 +1991,49 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
/* store the value in ints/second */
- adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
+ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
/* static value of interrupt rate */
- adapter->itr_setting = adapter->eitr_param;
+ adapter->rx_itr_setting = adapter->rx_eitr_param;
/* clear the lower bit as its used for dynamic state */
- adapter->itr_setting &= ~1;
+ adapter->rx_itr_setting &= ~1;
} else if (ec->rx_coalesce_usecs == 1) {
/* 1 means dynamic mode */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
} else {
/*
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
- adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
+ adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
else
- adapter->eitr_param = IXGBE_MAX_INT_RATE;
- adapter->itr_setting = 0;
+ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->rx_itr_setting = 0;
+ }
+
+ if (ec->tx_coalesce_usecs > 1) {
+ /* check the limits */
+ if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
+ (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
+ return -EINVAL;
+
+ /* store the value in ints/second */
+ adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;
+
+ /* static value of interrupt rate */
+ adapter->tx_itr_setting = adapter->tx_eitr_param;
+
+ /* clear the lower bit as its used for dynamic state */
+ adapter->tx_itr_setting &= ~1;
+ } else if (ec->tx_coalesce_usecs == 1) {
+ /* 1 means dynamic mode */
+ adapter->tx_eitr_param = 10000;
+ adapter->tx_itr_setting = 1;
+ } else {
+ adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
+ adapter->tx_itr_setting = 0;
}
/* MSI/MSIx Interrupt Mode */
@@ -1992,17 +2043,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->txr_count && !q_vector->rxr_count)
- /* tx vector gets half the rate */
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else
/* rx only or mixed */
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
- q_vector->eitr = adapter->eitr_param;
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 59ad9590e70..cbb143ca1eb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -49,7 +49,7 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.37-k2"
+#define DRV_VERSION "2.0.44-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
@@ -97,8 +97,12 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
+ board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
+ board_82599 },
/* required last entry */
{0, }
@@ -926,12 +930,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
r_idx + 1);
}
- /* if this is a tx only vector halve the interrupt rate */
if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
+ /* tx only */
+ q_vector->eitr = adapter->tx_eitr_param;
else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* rx or mixed */
+ q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
}
@@ -1359,7 +1363,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1420,7 +1424,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
@@ -1458,10 +1462,10 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
- /* If all Rx work done, exit the polling mode */
+ /* If all Tx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->tx_itr_setting & 1)
ixgbe_set_itr_msix(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
@@ -1848,7 +1852,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
- EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
+ EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
ixgbe_set_ivar(adapter, 0, 0, 0);
ixgbe_set_ivar(adapter, 1, 0, 0);
@@ -1885,12 +1889,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
adapter->tx_ring[i].head = IXGBE_TDH(j);
adapter->tx_ring[i].tail = IXGBE_TDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
+ /*
+ * Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
*/
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
+ break;
+ case ixgbe_mac_82599EB:
+ default:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+ break;
+ }
txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ default:
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+ break;
+ }
}
if (hw->mac.type == ixgbe_mac_82599EB) {
/* We enable 8 traffic classes, DCB only */
@@ -1970,6 +1991,50 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_configure_rscctl - enable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @index: index of ring to set
+ * @rx_buf_len: rx buffer length
+ **/
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
+ int rx_buf_len)
+{
+ struct ixgbe_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j;
+ u32 rscctrl;
+
+ rx_ring = &adapter->rx_ring[index];
+ j = rx_ring->reg_idx;
+ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
+ rscctrl |= IXGBE_RSCCTL_RSCEN;
+ /*
+ * we must limit the number of descriptors so that the
+ * total size of max desc * buf_len is not greater
+ * than 65535
+ */
+ if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+#if (MAX_SKB_FRAGS > 16)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+#elif (MAX_SKB_FRAGS > 8)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+#elif (MAX_SKB_FRAGS > 4)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+#else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+#endif
+ } else {
+ if (rx_buf_len < IXGBE_RXBUFFER_4096)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+ else if (rx_buf_len < IXGBE_RXBUFFER_8192)
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+ else
+ rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
+}
+
+/**
* ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
* @adapter: board private structure
*
@@ -1990,7 +2055,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 fctrl, hlreg0;
u32 reta = 0, mrqc = 0;
u32 rdrxctl;
- u32 rscctrl;
int rx_buf_len;
/* Decide whether to use packet split mode or not */
@@ -2148,36 +2212,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
- j = rx_ring->reg_idx;
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
- rscctrl |= IXGBE_RSCCTL_RSCEN;
- /*
- * we must limit the number of descriptors so that the
- * total size of max desc * buf_len is not greater
- * than 65535
- */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-#if (MAX_SKB_FRAGS > 16)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
-#endif
- } else {
- if (rx_buf_len < IXGBE_RXBUFFER_4096)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8192)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
- else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
- }
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbe_configure_rscctl(adapter, i, rx_buf_len);
+
/* Disable RSC for ACK packets */
IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
(IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -2926,6 +2963,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
+ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
+ del_timer_sync(&adapter->sfp_timer);
del_timer_sync(&adapter->watchdog_timer);
cancel_work_sync(&adapter->watchdog_task);
@@ -2989,7 +3028,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
- if (adapter->itr_setting & 1)
+ if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(adapter);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
@@ -3599,7 +3638,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- q_vector->eitr = adapter->eitr_param;
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = adapter->tx_eitr_param;
+ else
+ q_vector->eitr = adapter->rx_eitr_param;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
adapter->q_vector[q_idx] = q_vector;
@@ -3868,8 +3910,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
- adapter->itr_setting = 1;
- adapter->eitr_param = 20000;
+ adapter->rx_itr_setting = 1;
+ adapter->rx_eitr_param = 20000;
+ adapter->tx_itr_setting = 1;
+ adapter->tx_eitr_param = 10000;
/* set defaults for eitr in MegaBytes */
adapter->eitr_low = 10;
@@ -4409,10 +4453,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* 82598 hardware only has a 32 bit counter in the high register */
if (hw->mac.type == ixgbe_mac_82599EB) {
+ u64 tmp;
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
- IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
+ tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
+ adapter->stats.gorc += (tmp << 32);
adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
- IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
+ tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
+ adapter->stats.gotc += (tmp << 32);
adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
@@ -5048,7 +5095,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
/* Right now, we support IPv4 only */
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
- struct udphdr *uh;
struct iphdr *iph = ip_hdr(skb);
struct ethhdr *eth = (struct ethhdr *)skb->data;
u16 vlan_id, src_port, dst_port, flex_bytes;
@@ -5062,12 +5108,6 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
dst_port = th->dest;
l4type |= IXGBE_ATR_L4TYPE_TCP;
/* l4type IPv4 type is 0, no need to assign */
- } else if(iph->protocol == IPPROTO_UDP) {
- uh = udp_hdr(skb);
- src_port = uh->source;
- dst_port = uh->dest;
- l4type |= IXGBE_ATR_L4TYPE_UDP;
- /* l4type IPv4 type is 0, no need to assign */
} else {
/* Unsupported L4 header, just bail here */
return;
@@ -5471,12 +5511,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_pci_reg;
}
- err = pci_enable_pcie_error_reporting(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
- "0x%x\n", err);
- /* non-fatal, continue */
- }
+ pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
@@ -5785,7 +5820,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
set_bit(__IXGBE_DOWN, &adapter->state);
/* clear the module not found bit to make sure the worker won't
@@ -5836,10 +5870,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
free_netdev(netdev);
- err = pci_disable_pcie_error_reporting(pdev);
- if (err)
- dev_err(&pdev->dev,
- "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+ pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 8761d7899f7..ef4bdd58e01 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -49,9 +49,11 @@
#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
#define IXGBE_DEV_ID_82599_KX4 0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
/* General Registers */
#define IXGBE_CTRL 0x00000
@@ -1336,6 +1338,8 @@
#define IXGBE_AUTOC_KX4_SUPP 0x80000000
#define IXGBE_AUTOC_KX_SUPP 0x40000000
#define IXGBE_AUTOC_PAUSE 0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE 0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE 0x10000000
#define IXGBE_AUTOC_RF 0x08000000
#define IXGBE_AUTOC_PD_TMR 0x06000000
#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
@@ -1404,6 +1408,8 @@
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040
+
/* PCS1GLSTA Bit Masks */
#define IXGBE_PCS1GLSTA_LINK_OK 1
#define IXGBE_PCS1GLSTA_SYNK_OK 0x10
@@ -1424,6 +1430,11 @@
#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000
#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE 0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE 0x0400
+#define IXGBE_ANLP1_ASM_PAUSE 0x0800
+
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index b02a981c87a..34a6cfd1793 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters =
}
};
-struct enp2611_ixpdev_priv
-{
- struct ixpdev_priv ixpdev_priv;
- struct net_device_stats stats;
-};
-
static struct net_device *nds[3];
static struct timer_list link_check_timer;
-static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
-{
- struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
-
- pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
-
- return &(ip->stats);
-}
-
/* @@@ Poll the SFP moddef0 line too. */
/* @@@ Try to use the pm3386 DOOL interrupt as well. */
static void enp2611_check_link_status(unsigned long __dummy)
@@ -203,14 +188,13 @@ static int __init enp2611_init_module(void)
ports = pm3386_port_count();
for (i = 0; i < ports; i++) {
- nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
+ nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
if (nds[i] == NULL) {
while (--i >= 0)
free_netdev(nds[i]);
return -ENOMEM;
}
- nds[i]->get_stats = enp2611_get_stats;
pm3386_init_port(i);
pm3386_get_mac(i, nds[i]->dev_addr);
}
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 127243461a5..9aee0cc922c 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -21,6 +21,7 @@
#include "ixp2400_tx.ucode"
#include "ixpdev_priv.h"
#include "ixpdev.h"
+#include "pm3386.h"
#define DRV_MODULE_VERSION "0.2"
@@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev)
return 0;
}
+static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
+{
+ struct ixpdev_priv *ip = netdev_priv(dev);
+
+ pm3386_get_stats(ip->channel, &(dev->stats));
+
+ return &(dev->stats);
+}
+
static const struct net_device_ops ixpdev_netdev_ops = {
.ndo_open = ixpdev_open,
.ndo_stop = ixpdev_close,
@@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_get_stats = ixpdev_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixpdev_poll_controller,
#endif
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
new file mode 100644
index 00000000000..0be14d702be
--- /dev/null
+++ b/drivers/net/ks8851_mll.c
@@ -0,0 +1,1697 @@
+/**
+ * drivers/net/ks8851_mll.c
+ * Copyright (c) 2009 Micrel Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/**
+ * Supports:
+ * KS8851 16bit MLL chip from Micrel Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/cache.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define DRV_NAME "ks8851_mll"
+
+static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
+#define MAX_RECV_FRAMES 32
+#define MAX_BUF_SIZE 2048
+#define TX_BUF_SIZE 2000
+#define RX_BUF_SIZE 2000
+
+#define KS_CCR 0x08
+#define CCR_EEPROM (1 << 9)
+#define CCR_SPI (1 << 8)
+#define CCR_8BIT (1 << 7)
+#define CCR_16BIT (1 << 6)
+#define CCR_32BIT (1 << 5)
+#define CCR_SHARED (1 << 4)
+#define CCR_32PIN (1 << 0)
+
+/* MAC address registers */
+#define KS_MARL 0x10
+#define KS_MARM 0x12
+#define KS_MARH 0x14
+
+#define KS_OBCR 0x20
+#define OBCR_ODS_16MA (1 << 6)
+
+#define KS_EEPCR 0x22
+#define EEPCR_EESA (1 << 4)
+#define EEPCR_EESB (1 << 3)
+#define EEPCR_EEDO (1 << 2)
+#define EEPCR_EESCK (1 << 1)
+#define EEPCR_EECS (1 << 0)
+
+#define KS_MBIR 0x24
+#define MBIR_TXMBF (1 << 12)
+#define MBIR_TXMBFA (1 << 11)
+#define MBIR_RXMBF (1 << 4)
+#define MBIR_RXMBFA (1 << 3)
+
+#define KS_GRR 0x26
+#define GRR_QMU (1 << 1)
+#define GRR_GSR (1 << 0)
+
+#define KS_WFCR 0x2A
+#define WFCR_MPRXE (1 << 7)
+#define WFCR_WF3E (1 << 3)
+#define WFCR_WF2E (1 << 2)
+#define WFCR_WF1E (1 << 1)
+#define WFCR_WF0E (1 << 0)
+
+#define KS_WF0CRC0 0x30
+#define KS_WF0CRC1 0x32
+#define KS_WF0BM0 0x34
+#define KS_WF0BM1 0x36
+#define KS_WF0BM2 0x38
+#define KS_WF0BM3 0x3A
+
+#define KS_WF1CRC0 0x40
+#define KS_WF1CRC1 0x42
+#define KS_WF1BM0 0x44
+#define KS_WF1BM1 0x46
+#define KS_WF1BM2 0x48
+#define KS_WF1BM3 0x4A
+
+#define KS_WF2CRC0 0x50
+#define KS_WF2CRC1 0x52
+#define KS_WF2BM0 0x54
+#define KS_WF2BM1 0x56
+#define KS_WF2BM2 0x58
+#define KS_WF2BM3 0x5A
+
+#define KS_WF3CRC0 0x60
+#define KS_WF3CRC1 0x62
+#define KS_WF3BM0 0x64
+#define KS_WF3BM1 0x66
+#define KS_WF3BM2 0x68
+#define KS_WF3BM3 0x6A
+
+#define KS_TXCR 0x70
+#define TXCR_TCGICMP (1 << 8)
+#define TXCR_TCGUDP (1 << 7)
+#define TXCR_TCGTCP (1 << 6)
+#define TXCR_TCGIP (1 << 5)
+#define TXCR_FTXQ (1 << 4)
+#define TXCR_TXFCE (1 << 3)
+#define TXCR_TXPE (1 << 2)
+#define TXCR_TXCRC (1 << 1)
+#define TXCR_TXE (1 << 0)
+
+#define KS_TXSR 0x72
+#define TXSR_TXLC (1 << 13)
+#define TXSR_TXMC (1 << 12)
+#define TXSR_TXFID_MASK (0x3f << 0)
+#define TXSR_TXFID_SHIFT (0)
+#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
+
+
+#define KS_RXCR1 0x74
+#define RXCR1_FRXQ (1 << 15)
+#define RXCR1_RXUDPFCC (1 << 14)
+#define RXCR1_RXTCPFCC (1 << 13)
+#define RXCR1_RXIPFCC (1 << 12)
+#define RXCR1_RXPAFMA (1 << 11)
+#define RXCR1_RXFCE (1 << 10)
+#define RXCR1_RXEFE (1 << 9)
+#define RXCR1_RXMAFMA (1 << 8)
+#define RXCR1_RXBE (1 << 7)
+#define RXCR1_RXME (1 << 6)
+#define RXCR1_RXUE (1 << 5)
+#define RXCR1_RXAE (1 << 4)
+#define RXCR1_RXINVF (1 << 1)
+#define RXCR1_RXE (1 << 0)
+#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
+ RXCR1_RXMAFMA | RXCR1_RXPAFMA)
+
+#define KS_RXCR2 0x76
+#define RXCR2_SRDBL_MASK (0x7 << 5)
+#define RXCR2_SRDBL_SHIFT (5)
+#define RXCR2_SRDBL_4B (0x0 << 5)
+#define RXCR2_SRDBL_8B (0x1 << 5)
+#define RXCR2_SRDBL_16B (0x2 << 5)
+#define RXCR2_SRDBL_32B (0x3 << 5)
+/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
+#define RXCR2_IUFFP (1 << 4)
+#define RXCR2_RXIUFCEZ (1 << 3)
+#define RXCR2_UDPLFE (1 << 2)
+#define RXCR2_RXICMPFCC (1 << 1)
+#define RXCR2_RXSAF (1 << 0)
+
+#define KS_TXMIR 0x78
+
+#define KS_RXFHSR 0x7C
+#define RXFSHR_RXFV (1 << 15)
+#define RXFSHR_RXICMPFCS (1 << 13)
+#define RXFSHR_RXIPFCS (1 << 12)
+#define RXFSHR_RXTCPFCS (1 << 11)
+#define RXFSHR_RXUDPFCS (1 << 10)
+#define RXFSHR_RXBF (1 << 7)
+#define RXFSHR_RXMF (1 << 6)
+#define RXFSHR_RXUF (1 << 5)
+#define RXFSHR_RXMR (1 << 4)
+#define RXFSHR_RXFT (1 << 3)
+#define RXFSHR_RXFTL (1 << 2)
+#define RXFSHR_RXRF (1 << 1)
+#define RXFSHR_RXCE (1 << 0)
+#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
+ RXFSHR_RXFTL | RXFSHR_RXMR |\
+ RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
+ RXFSHR_RXTCPFCS)
+#define KS_RXFHBCR 0x7E
+#define RXFHBCR_CNT_MASK 0x0FFF
+
+#define KS_TXQCR 0x80
+#define TXQCR_AETFE (1 << 2)
+#define TXQCR_TXQMAM (1 << 1)
+#define TXQCR_METFE (1 << 0)
+
+#define KS_RXQCR 0x82
+#define RXQCR_RXDTTS (1 << 12)
+#define RXQCR_RXDBCTS (1 << 11)
+#define RXQCR_RXFCTS (1 << 10)
+#define RXQCR_RXIPHTOE (1 << 9)
+#define RXQCR_RXDTTE (1 << 7)
+#define RXQCR_RXDBCTE (1 << 6)
+#define RXQCR_RXFCTE (1 << 5)
+#define RXQCR_ADRFE (1 << 4)
+#define RXQCR_SDA (1 << 3)
+#define RXQCR_RRXEF (1 << 0)
+#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
+
+#define KS_TXFDPR 0x84
+#define TXFDPR_TXFPAI (1 << 14)
+#define TXFDPR_TXFP_MASK (0x7ff << 0)
+#define TXFDPR_TXFP_SHIFT (0)
+
+#define KS_RXFDPR 0x86
+#define RXFDPR_RXFPAI (1 << 14)
+
+#define KS_RXDTTR 0x8C
+#define KS_RXDBCTR 0x8E
+
+#define KS_IER 0x90
+#define KS_ISR 0x92
+#define IRQ_LCI (1 << 15)
+#define IRQ_TXI (1 << 14)
+#define IRQ_RXI (1 << 13)
+#define IRQ_RXOI (1 << 11)
+#define IRQ_TXPSI (1 << 9)
+#define IRQ_RXPSI (1 << 8)
+#define IRQ_TXSAI (1 << 6)
+#define IRQ_RXWFDI (1 << 5)
+#define IRQ_RXMPDI (1 << 4)
+#define IRQ_LDI (1 << 3)
+#define IRQ_EDI (1 << 2)
+#define IRQ_SPIBEI (1 << 1)
+#define IRQ_DEDI (1 << 0)
+
+#define KS_RXFCTR 0x9C
+#define RXFCTR_THRESHOLD_MASK 0x00FF
+
+#define KS_RXFC 0x9D
+#define RXFCTR_RXFC_MASK (0xff << 8)
+#define RXFCTR_RXFC_SHIFT (8)
+#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
+#define RXFCTR_RXFCT_MASK (0xff << 0)
+#define RXFCTR_RXFCT_SHIFT (0)
+
+#define KS_TXNTFSR 0x9E
+
+#define KS_MAHTR0 0xA0
+#define KS_MAHTR1 0xA2
+#define KS_MAHTR2 0xA4
+#define KS_MAHTR3 0xA6
+
+#define KS_FCLWR 0xB0
+#define KS_FCHWR 0xB2
+#define KS_FCOWR 0xB4
+
+#define KS_CIDER 0xC0
+#define CIDER_ID 0x8870
+#define CIDER_REV_MASK (0x7 << 1)
+#define CIDER_REV_SHIFT (1)
+#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
+
+#define KS_CGCR 0xC6
+#define KS_IACR 0xC8
+#define IACR_RDEN (1 << 12)
+#define IACR_TSEL_MASK (0x3 << 10)
+#define IACR_TSEL_SHIFT (10)
+#define IACR_TSEL_MIB (0x3 << 10)
+#define IACR_ADDR_MASK (0x1f << 0)
+#define IACR_ADDR_SHIFT (0)
+
+#define KS_IADLR 0xD0
+#define KS_IAHDR 0xD2
+
+#define KS_PMECR 0xD4
+#define PMECR_PME_DELAY (1 << 14)
+#define PMECR_PME_POL (1 << 12)
+#define PMECR_WOL_WAKEUP (1 << 11)
+#define PMECR_WOL_MAGICPKT (1 << 10)
+#define PMECR_WOL_LINKUP (1 << 9)
+#define PMECR_WOL_ENERGY (1 << 8)
+#define PMECR_AUTO_WAKE_EN (1 << 7)
+#define PMECR_WAKEUP_NORMAL (1 << 6)
+#define PMECR_WKEVT_MASK (0xf << 2)
+#define PMECR_WKEVT_SHIFT (2)
+#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
+#define PMECR_WKEVT_ENERGY (0x1 << 2)
+#define PMECR_WKEVT_LINK (0x2 << 2)
+#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
+#define PMECR_WKEVT_FRAME (0x8 << 2)
+#define PMECR_PM_MASK (0x3 << 0)
+#define PMECR_PM_SHIFT (0)
+#define PMECR_PM_NORMAL (0x0 << 0)
+#define PMECR_PM_ENERGY (0x1 << 0)
+#define PMECR_PM_SOFTDOWN (0x2 << 0)
+#define PMECR_PM_POWERSAVE (0x3 << 0)
+
+/* Standard MII PHY data */
+#define KS_P1MBCR 0xE4
+#define P1MBCR_FORCE_FDX (1 << 8)
+
+#define KS_P1MBSR 0xE6
+#define P1MBSR_AN_COMPLETE (1 << 5)
+#define P1MBSR_AN_CAPABLE (1 << 3)
+#define P1MBSR_LINK_UP (1 << 2)
+
+#define KS_PHY1ILR 0xE8
+#define KS_PHY1IHR 0xEA
+#define KS_P1ANAR 0xEC
+#define KS_P1ANLPR 0xEE
+
+#define KS_P1SCLMD 0xF4
+#define P1SCLMD_LEDOFF (1 << 15)
+#define P1SCLMD_TXIDS (1 << 14)
+#define P1SCLMD_RESTARTAN (1 << 13)
+#define P1SCLMD_DISAUTOMDIX (1 << 10)
+#define P1SCLMD_FORCEMDIX (1 << 9)
+#define P1SCLMD_AUTONEGEN (1 << 7)
+#define P1SCLMD_FORCE100 (1 << 6)
+#define P1SCLMD_FORCEFDX (1 << 5)
+#define P1SCLMD_ADV_FLOW (1 << 4)
+#define P1SCLMD_ADV_100BT_FDX (1 << 3)
+#define P1SCLMD_ADV_100BT_HDX (1 << 2)
+#define P1SCLMD_ADV_10BT_FDX (1 << 1)
+#define P1SCLMD_ADV_10BT_HDX (1 << 0)
+
+#define KS_P1CR 0xF6
+#define P1CR_HP_MDIX (1 << 15)
+#define P1CR_REV_POL (1 << 13)
+#define P1CR_OP_100M (1 << 10)
+#define P1CR_OP_FDX (1 << 9)
+#define P1CR_OP_MDI (1 << 7)
+#define P1CR_AN_DONE (1 << 6)
+#define P1CR_LINK_GOOD (1 << 5)
+#define P1CR_PNTR_FLOW (1 << 4)
+#define P1CR_PNTR_100BT_FDX (1 << 3)
+#define P1CR_PNTR_100BT_HDX (1 << 2)
+#define P1CR_PNTR_10BT_FDX (1 << 1)
+#define P1CR_PNTR_10BT_HDX (1 << 0)
+
+/* TX Frame control */
+
+#define TXFR_TXIC (1 << 15)
+#define TXFR_TXFID_MASK (0x3f << 0)
+#define TXFR_TXFID_SHIFT (0)
+
+#define KS_P1SR 0xF8
+#define P1SR_HP_MDIX (1 << 15)
+#define P1SR_REV_POL (1 << 13)
+#define P1SR_OP_100M (1 << 10)
+#define P1SR_OP_FDX (1 << 9)
+#define P1SR_OP_MDI (1 << 7)
+#define P1SR_AN_DONE (1 << 6)
+#define P1SR_LINK_GOOD (1 << 5)
+#define P1SR_PNTR_FLOW (1 << 4)
+#define P1SR_PNTR_100BT_FDX (1 << 3)
+#define P1SR_PNTR_100BT_HDX (1 << 2)
+#define P1SR_PNTR_10BT_FDX (1 << 1)
+#define P1SR_PNTR_10BT_HDX (1 << 0)
+
+#define ENUM_BUS_NONE 0
+#define ENUM_BUS_8BIT 1
+#define ENUM_BUS_16BIT 2
+#define ENUM_BUS_32BIT 3
+
+#define MAX_MCAST_LST 32
+#define HW_MCAST_SIZE 8
+#define MAC_ADDR_LEN 6
+
+/**
+ * union ks_tx_hdr - tx header data
+ * @txb: The header as bytes
+ * @txw: The header as 16bit, little-endian words
+ *
+ * A dual representation of the tx header data to allow
+ * access to individual bytes, and to allow 16bit accesses
+ * with 16bit alignment.
+ */
+union ks_tx_hdr {
+ u8 txb[4];
+ __le16 txw[2];
+};
+
+/**
+ * struct ks_net - KS8851 driver private data
+ * @net_device : The network device we're bound to
+ * @hw_addr : start address of data register.
+ * @hw_addr_cmd : start address of command register.
+ * @txh : temporaly buffer to save status/length.
+ * @lock : Lock to ensure that the device is not accessed when busy.
+ * @pdev : Pointer to platform device.
+ * @mii : The MII state information for the mii calls.
+ * @frame_head_info : frame header information for multi-pkt rx.
+ * @statelock : Lock on this structure for tx list.
+ * @msg_enable : The message flags controlling driver output (see ethtool).
+ * @frame_cnt : number of frames received.
+ * @bus_width : i/o bus width.
+ * @irq : irq number assigned to this device.
+ * @rc_rxqcr : Cached copy of KS_RXQCR.
+ * @rc_txcr : Cached copy of KS_TXCR.
+ * @rc_ier : Cached copy of KS_IER.
+ * @sharedbus : Multipex(addr and data bus) mode indicator.
+ * @cmd_reg_cache : command register cached.
+ * @cmd_reg_cache_int : command register cached. Used in the irq handler.
+ * @promiscuous : promiscuous mode indicator.
+ * @all_mcast : mutlicast indicator.
+ * @mcast_lst_size : size of multicast list.
+ * @mcast_lst : multicast list.
+ * @mcast_bits : multicast enabed.
+ * @mac_addr : MAC address assigned to this device.
+ * @fid : frame id.
+ * @extra_byte : number of extra byte prepended rx pkt.
+ * @enabled : indicator this device works.
+ *
+ * The @lock ensures that the chip is protected when certain operations are
+ * in progress. When the read or write packet transfer is in progress, most
+ * of the chip registers are not accessible until the transfer is finished and
+ * the DMA has been de-asserted.
+ *
+ * The @statelock is used to protect information in the structure which may
+ * need to be accessed via several sources, such as the network driver layer
+ * or one of the work queues.
+ *
+ */
+
+/* Receive multiplex framer header info */
+struct type_frame_head {
+ u16 sts; /* Frame status */
+ u16 len; /* Byte count */
+};
+
+struct ks_net {
+ struct net_device *netdev;
+ void __iomem *hw_addr;
+ void __iomem *hw_addr_cmd;
+ union ks_tx_hdr txh ____cacheline_aligned;
+ struct mutex lock; /* spinlock to be interrupt safe */
+ struct platform_device *pdev;
+ struct mii_if_info mii;
+ struct type_frame_head *frame_head_info;
+ spinlock_t statelock;
+ u32 msg_enable;
+ u32 frame_cnt;
+ int bus_width;
+ int irq;
+
+ u16 rc_rxqcr;
+ u16 rc_txcr;
+ u16 rc_ier;
+ u16 sharedbus;
+ u16 cmd_reg_cache;
+ u16 cmd_reg_cache_int;
+ u16 promiscuous;
+ u16 all_mcast;
+ u16 mcast_lst_size;
+ u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN];
+ u8 mcast_bits[HW_MCAST_SIZE];
+ u8 mac_addr[6];
+ u8 fid;
+ u8 extra_byte;
+ u8 enabled;
+};
+
+static int msg_enable;
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define BE3 0x8000 /* Byte Enable 3 */
+#define BE2 0x4000 /* Byte Enable 2 */
+#define BE1 0x2000 /* Byte Enable 1 */
+#define BE0 0x1000 /* Byte Enable 0 */
+
+/**
+ * register read/write calls.
+ *
+ * All these calls issue transactions to access the chip's registers. They
+ * all require that the necessary lock is held to prevent accesses when the
+ * chip is busy transfering packet data (RX/TX FIFO accesses).
+ */
+
+/**
+ * ks_rdreg8 - read 8 bit register from device
+ * @ks : The chip information
+ * @offset: The register address
+ *
+ * Read a 8bit register from the chip, returning the result
+ */
+static u8 ks_rdreg8(struct ks_net *ks, int offset)
+{
+ u16 data;
+ u8 shift_bit = offset & 0x03;
+ u8 shift_data = (offset & 1) << 3;
+ ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ data = ioread16(ks->hw_addr);
+ return (u8)(data >> shift_data);
+}
+
+/**
+ * ks_rdreg16 - read 16 bit register from device
+ * @ks : The chip information
+ * @offset: The register address
+ *
+ * Read a 16bit register from the chip, returning the result
+ */
+
+static u16 ks_rdreg16(struct ks_net *ks, int offset)
+{
+ ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ return ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_wrreg8 - write 8bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+{
+ u8 shift_bit = (offset & 0x03);
+ u16 value_write = (u16)(value << ((offset & 1) << 3));
+ ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value_write, ks->hw_addr);
+}
+
+/**
+ * ks_wrreg16 - write 16bit register value to chip
+ * @ks: The chip information
+ * @offset: The register address
+ * @value: The value to write
+ *
+ */
+
+static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+{
+ ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+ iowrite16(value, ks->hw_addr);
+}
+
+/**
+ * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip state
+ * @wptr: buffer address to save data
+ * @len: length in byte to read
+ *
+ */
+static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+ len >>= 1;
+ while (len--)
+ *wptr++ = (u16)ioread16(ks->hw_addr);
+}
+
+/**
+ * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
+ * @ks: The chip information
+ * @wptr: buffer address
+ * @len: length in byte to write
+ *
+ */
+static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
+{
+ len >>= 1;
+ while (len--)
+ iowrite16(*wptr++, ks->hw_addr);
+}
+
+/**
+ * ks_tx_fifo_space - return the available hardware buffer size.
+ * @ks: The chip information
+ *
+ */
+static inline u16 ks_tx_fifo_space(struct ks_net *ks)
+{
+ return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
+}
+
+/**
+ * ks_save_cmd_reg - save the command register from the cache.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_save_cmd_reg(struct ks_net *ks)
+{
+ /*ks8851 MLL has a bug to read back the command register.
+ * So rely on software to save the content of command register.
+ */
+ ks->cmd_reg_cache_int = ks->cmd_reg_cache;
+}
+
+/**
+ * ks_restore_cmd_reg - restore the command register from the cache and
+ * write to hardware register.
+ * @ks: The chip information
+ *
+ */
+static inline void ks_restore_cmd_reg(struct ks_net *ks)
+{
+ ks->cmd_reg_cache = ks->cmd_reg_cache_int;
+ iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+}
+
+/**
+ * ks_set_powermode - set power mode of the device
+ * @ks: The chip information
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ if (netif_msg_hw(ks))
+ ks_dbg(ks, "setting power mode %d\n", pwrmode);
+
+ ks_rdreg16(ks, KS_GRR);
+ pmecr = ks_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
+ * ks_read_config - read chip configuration of bus width.
+ * @ks: The chip information
+ *
+ */
+static void ks_read_config(struct ks_net *ks)
+{
+ u16 reg_data = 0;
+
+ /* Regardless of bus width, 8 bit read should always work.*/
+ reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+ reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
+
+ /* addr/data bus are multiplexed */
+ ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+
+ /* There are garbage data when reading data from QMU,
+ depending on bus-width.
+ */
+
+ if (reg_data & CCR_8BIT) {
+ ks->bus_width = ENUM_BUS_8BIT;
+ ks->extra_byte = 1;
+ } else if (reg_data & CCR_16BIT) {
+ ks->bus_width = ENUM_BUS_16BIT;
+ ks->extra_byte = 2;
+ } else {
+ ks->bus_width = ENUM_BUS_32BIT;
+ ks->extra_byte = 4;
+ }
+}
+
+/**
+ * ks_soft_reset - issue one of the soft reset to the device
+ * @ks: The device state.
+ * @op: The bit(s) to set in the GRR
+ *
+ * Issue the relevant soft-reset command to the device's GRR register
+ * specified by @op.
+ *
+ * Note, the delays are in there as a caution to ensure that the reset
+ * has time to take effect and then complete. Since the datasheet does
+ * not currently specify the exact sequence, we have chosen something
+ * that seems to work with our device.
+ */
+static void ks_soft_reset(struct ks_net *ks, unsigned op)
+{
+ /* Disable interrupt first */
+ ks_wrreg16(ks, KS_IER, 0x0000);
+ ks_wrreg16(ks, KS_GRR, op);
+ mdelay(10); /* wait a short time to effect reset */
+ ks_wrreg16(ks, KS_GRR, 0);
+ mdelay(1); /* wait for condition to clear */
+}
+
+
+/**
+ * ks_read_qmu - read 1 pkt data from the QMU.
+ * @ks: The chip information
+ * @buf: buffer address to save 1 pkt
+ * @len: Pkt length
+ * Here is the sequence to read 1 pkt:
+ * 1. set sudo DMA mode
+ * 2. read prepend data
+ * 3. read pkt data
+ * 4. reset sudo DMA Mode
+ */
+static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
+{
+ u32 r = ks->extra_byte & 0x1 ;
+ u32 w = ks->extra_byte - r;
+
+ /* 1. set sudo DMA mode */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+ ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+
+ /* 2. read prepend data */
+ /**
+ * read 4 + extra bytes and discard them.
+ * extra bytes for dummy, 2 for status, 2 for len
+ */
+
+ /* use likely(r) for 8 bit access for performance */
+ if (unlikely(r))
+ ioread8(ks->hw_addr);
+ ks_inblk(ks, buf, w + 2 + 2);
+
+ /* 3. read pkt data */
+ ks_inblk(ks, buf, ALIGN(len, 4));
+
+ /* 4. reset sudo DMA Mode */
+ ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+}
+
+/**
+ * ks_rcv - read multiple pkts data from the QMU.
+ * @ks: The chip information
+ * @netdev: The network device being opened.
+ *
+ * Read all of header information before reading pkt content.
+ * It is not allowed only port of pkts in QMU after issuing
+ * interrupt ack.
+ */
+static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
+{
+ u32 i;
+ struct type_frame_head *frame_hdr = ks->frame_head_info;
+ struct sk_buff *skb;
+
+ ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
+
+ /* read all header information */
+ for (i = 0; i < ks->frame_cnt; i++) {
+ /* Checking Received packet status */
+ frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
+ /* Get packet len from hardware */
+ frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
+ frame_hdr++;
+ }
+
+ frame_hdr = ks->frame_head_info;
+ while (ks->frame_cnt--) {
+ skb = dev_alloc_skb(frame_hdr->len + 16);
+ if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
+ (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+ skb_reserve(skb, 2);
+ /* read data block including CRC 4 bytes */
+ ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
+ skb_put(skb, frame_hdr->len);
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ } else {
+ printk(KERN_ERR "%s: err:skb alloc\n", __func__);
+ ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+ if (skb)
+ dev_kfree_skb_irq(skb);
+ }
+ frame_hdr++;
+ }
+}
+
+/**
+ * ks_update_link_status - link status update.
+ * @netdev: The network device being opened.
+ * @ks: The chip information
+ *
+ */
+
+static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
+{
+ /* check the status of the link */
+ u32 link_up_status;
+ if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
+ netif_carrier_on(netdev);
+ link_up_status = true;
+ } else {
+ netif_carrier_off(netdev);
+ link_up_status = false;
+ }
+ if (netif_msg_link(ks))
+ ks_dbg(ks, "%s: %s\n",
+ __func__, link_up_status ? "UP" : "DOWN");
+}
+
+/**
+ * ks_irq - device interrupt handler
+ * @irq: Interrupt number passed from the IRQ hnalder.
+ * @pw: The private word passed to register_irq(), our struct ks_net.
+ *
+ * This is the handler invoked to find out what happened
+ *
+ * Read the interrupt status, work out what needs to be done and then clear
+ * any of the interrupts that are not needed.
+ */
+
+static irqreturn_t ks_irq(int irq, void *pw)
+{
+ struct ks_net *ks = pw;
+ struct net_device *netdev = ks->netdev;
+ u16 status;
+
+ /*this should be the first in IRQ handler */
+ ks_save_cmd_reg(ks);
+
+ status = ks_rdreg16(ks, KS_ISR);
+ if (unlikely(!status)) {
+ ks_restore_cmd_reg(ks);
+ return IRQ_NONE;
+ }
+
+ ks_wrreg16(ks, KS_ISR, status);
+
+ if (likely(status & IRQ_RXI))
+ ks_rcv(ks, netdev);
+
+ if (unlikely(status & IRQ_LCI))
+ ks_update_link_status(netdev, ks);
+
+ if (unlikely(status & IRQ_TXI))
+ netif_wake_queue(netdev);
+
+ if (unlikely(status & IRQ_LDI)) {
+
+ u16 pmecr = ks_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_WKEVT_MASK;
+ ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
+ }
+
+ /* this should be the last in IRQ handler*/
+ ks_restore_cmd_reg(ks);
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * ks_net_open - open network device
+ * @netdev: The network device being opened.
+ *
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device.
+ */
+static int ks_net_open(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int err;
+
+#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+ /* lock the card, even if we may not actually do anything
+ * else at the moment.
+ */
+
+ if (netif_msg_ifup(ks))
+ ks_dbg(ks, "%s - entry\n", __func__);
+
+ /* reset the HW */
+ err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
+
+ if (err) {
+ printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
+ ks->irq, err);
+ return err;
+ }
+
+ if (netif_msg_ifup(ks))
+ ks_dbg(ks, "network device %s up\n", netdev->name);
+
+ return 0;
+}
+
+/**
+ * ks_net_stop - close network device
+ * @netdev: The device being closed.
+ *
+ * Called to close down a network device which has been active. Cancell any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state whilst it is not being used.
+ */
+static int ks_net_stop(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+
+ if (netif_msg_ifdown(ks))
+ ks_info(ks, "%s: shutting down\n", netdev->name);
+
+ netif_stop_queue(netdev);
+
+ kfree(ks->frame_head_info);
+
+ mutex_lock(&ks->lock);
+
+ /* turn off the IRQs and ack any outstanding */
+ ks_wrreg16(ks, KS_IER, 0x0000);
+ ks_wrreg16(ks, KS_ISR, 0xffff);
+
+ /* shutdown RX process */
+ ks_wrreg16(ks, KS_RXCR1, 0x0000);
+
+ /* shutdown TX process */
+ ks_wrreg16(ks, KS_TXCR, 0x0000);
+
+ /* set powermode to soft power down to save power */
+ ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+ free_irq(ks->irq, netdev);
+ mutex_unlock(&ks->lock);
+ return 0;
+}
+
+
+/**
+ * ks_write_qmu - write 1 pkt data to the QMU.
+ * @ks: The chip information
+ * @pdata: buffer address to save 1 pkt
+ * @len: Pkt length in byte
+ * Here is the sequence to write 1 pkt:
+ * 1. set sudo DMA mode
+ * 2. write status/length
+ * 3. write pkt data
+ * 4. reset sudo DMA Mode
+ * 5. reset sudo DMA mode
+ * 6. Wait until pkt is out
+ */
+static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
+{
+ unsigned fid = ks->fid;
+
+ fid = ks->fid;
+ ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
+
+ /* reduce the tx interrupt occurrances. */
+ if (!fid)
+ fid |= TXFR_TXIC; /* irq on completion */
+
+ /* start header at txb[0] to align txw entries */
+ ks->txh.txw[0] = cpu_to_le16(fid);
+ ks->txh.txw[1] = cpu_to_le16(len);
+
+ /* 1. set sudo-DMA mode */
+ ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ /* 2. write status/lenth info */
+ ks_outblk(ks, ks->txh.txw, 4);
+ /* 3. write pkt data */
+ ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+ /* 4. reset sudo-DMA mode */
+ ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+ ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+ /* 6. wait until TXQCR_METFE is auto-cleared */
+ while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
+ ;
+}
+
+static void ks_disable_int(struct ks_net *ks)
+{
+ ks_wrreg16(ks, KS_IER, 0x0000);
+} /* ks_disable_int */
+
+static void ks_enable_int(struct ks_net *ks)
+{
+ ks_wrreg16(ks, KS_IER, ks->rc_ier);
+} /* ks_enable_int */
+
+/**
+ * ks_start_xmit - transmit packet
+ * @skb : The buffer to transmit
+ * @netdev : The device used to transmit the packet.
+ *
+ * Called by the network layer to transmit the @skb.
+ * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
+ * So while tx is in-progress, prevent IRQ interrupt from happenning.
+ */
+static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int retv = NETDEV_TX_OK;
+ struct ks_net *ks = netdev_priv(netdev);
+
+ disable_irq(netdev->irq);
+ ks_disable_int(ks);
+ spin_lock(&ks->statelock);
+
+ /* Extra space are required:
+ * 4 byte for alignment, 4 for status/length, 4 for CRC
+ */
+
+ if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
+ ks_write_qmu(ks, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ } else
+ retv = NETDEV_TX_BUSY;
+ spin_unlock(&ks->statelock);
+ ks_enable_int(ks);
+ enable_irq(netdev->irq);
+ return retv;
+}
+
+/**
+ * ks_start_rx - ready to serve pkts
+ * @ks : The chip information
+ *
+ */
+static void ks_start_rx(struct ks_net *ks)
+{
+ u16 cntl;
+
+ /* Enables QMU Receive (RXCR1). */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl |= RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+} /* ks_start_rx */
+
+/**
+ * ks_stop_rx - stop to serve pkts
+ * @ks : The chip information
+ *
+ */
+static void ks_stop_rx(struct ks_net *ks)
+{
+ u16 cntl;
+
+ /* Disables QMU Receive (RXCR1). */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl &= ~RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+} /* ks_stop_rx */
+
+static unsigned long const ethernet_polynomial = 0x04c11db7U;
+
+static unsigned long ether_gen_crc(int length, u8 *data)
+{
+ long crc = -1;
+ while (--length >= 0) {
+ u8 current_octet = *data++;
+ int bit;
+
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ?
+ ethernet_polynomial : 0);
+ }
+ }
+ return (unsigned long)crc;
+} /* ether_gen_crc */
+
+/**
+* ks_set_grpaddr - set multicast information
+* @ks : The chip information
+*/
+
+static void ks_set_grpaddr(struct ks_net *ks)
+{
+ u8 i;
+ u32 index, position, value;
+
+ memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
+
+ for (i = 0; i < ks->mcast_lst_size; i++) {
+ position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ ks->mcast_bits[index] |= (u8)value;
+ }
+
+ for (i = 0; i < HW_MCAST_SIZE; i++) {
+ if (i & 1) {
+ ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
+ (ks->mcast_bits[i] << 8) |
+ ks->mcast_bits[i - 1]);
+ }
+ }
+} /* ks_set_grpaddr */
+
+/*
+* ks_clear_mcast - clear multicast information
+*
+* @ks : The chip information
+* This routine removes all mcast addresses set in the hardware.
+*/
+
+static void ks_clear_mcast(struct ks_net *ks)
+{
+ u16 i, mcast_size;
+ for (i = 0; i < HW_MCAST_SIZE; i++)
+ ks->mcast_bits[i] = 0;
+
+ mcast_size = HW_MCAST_SIZE >> 2;
+ for (i = 0; i < mcast_size; i++)
+ ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
+}
+
+static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
+{
+ u16 cntl;
+ ks->promiscuous = promiscuous_mode;
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+
+ cntl &= ~RXCR1_FILTER_MASK;
+ if (promiscuous_mode)
+ /* Enable Promiscuous mode */
+ cntl |= RXCR1_RXAE | RXCR1_RXINVF;
+ else
+ /* Disable Promiscuous mode (default normal mode) */
+ cntl |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+
+} /* ks_set_promis */
+
+static void ks_set_mcast(struct ks_net *ks, u16 mcast)
+{
+ u16 cntl;
+
+ ks->all_mcast = mcast;
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+ cntl = ks_rdreg16(ks, KS_RXCR1);
+ cntl &= ~RXCR1_FILTER_MASK;
+ if (mcast)
+ /* Enable "Perfect with Multicast address passed mode" */
+ cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+ else
+ /**
+ * Disable "Perfect with Multicast address passed
+ * mode" (normal mode).
+ */
+ cntl |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, cntl);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+} /* ks_set_mcast */
+
+static void ks_set_rx_mode(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ struct dev_mc_list *ptr;
+
+ /* Turn on/off promiscuous mode. */
+ if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
+ ks_set_promis(ks,
+ (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
+ /* Turn on/off all mcast mode. */
+ else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
+ ks_set_mcast(ks,
+ (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
+ else
+ ks_set_promis(ks, false);
+
+ if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
+ if (netdev->mc_count <= MAX_MCAST_LST) {
+ int i = 0;
+ for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
+ if (!(*ptr->dmi_addr & 1))
+ continue;
+ if (i >= MAX_MCAST_LST)
+ break;
+ memcpy(ks->mcast_lst[i++], ptr->dmi_addr,
+ MAC_ADDR_LEN);
+ }
+ ks->mcast_lst_size = (u8)i;
+ ks_set_grpaddr(ks);
+ } else {
+ /**
+ * List too big to support so
+ * turn on all mcast mode.
+ */
+ ks->mcast_lst_size = MAX_MCAST_LST;
+ ks_set_mcast(ks, true);
+ }
+ } else {
+ ks->mcast_lst_size = 0;
+ ks_clear_mcast(ks);
+ }
+} /* ks_set_rx_mode */
+
+static void ks_set_mac(struct ks_net *ks, u8 *data)
+{
+ u16 *pw = (u16 *)data;
+ u16 w, u;
+
+ ks_stop_rx(ks); /* Stop receiving for reconfiguration */
+
+ u = *pw++;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARH, w);
+
+ u = *pw++;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARM, w);
+
+ u = *pw;
+ w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
+ ks_wrreg16(ks, KS_MARL, w);
+
+ memcpy(ks->mac_addr, data, 6);
+
+ if (ks->enabled)
+ ks_start_rx(ks);
+}
+
+static int ks_set_mac_address(struct net_device *netdev, void *paddr)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ struct sockaddr *addr = paddr;
+ u8 *da;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ da = (u8 *)netdev->dev_addr;
+
+ ks_set_mac(ks, da);
+ return 0;
+}
+
+static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
+}
+
+static const struct net_device_ops ks_netdev_ops = {
+ .ndo_open = ks_net_open,
+ .ndo_stop = ks_net_stop,
+ .ndo_do_ioctl = ks_net_ioctl,
+ .ndo_start_xmit = ks_start_xmit,
+ .ndo_set_mac_address = ks_set_mac_address,
+ .ndo_set_rx_mode = ks_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/* ethtool support */
+
+static void ks_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *di)
+{
+ strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
+ strlcpy(di->version, "1.00", sizeof(di->version));
+ strlcpy(di->bus_info, dev_name(netdev->dev.parent),
+ sizeof(di->bus_info));
+}
+
+static u32 ks_get_msglevel(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return ks->msg_enable;
+}
+
+static void ks_set_msglevel(struct net_device *netdev, u32 to)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ ks->msg_enable = to;
+}
+
+static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_ethtool_gset(&ks->mii, cmd);
+}
+
+static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_ethtool_sset(&ks->mii, cmd);
+}
+
+static u32 ks_get_link(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_link_ok(&ks->mii);
+}
+
+static int ks_nway_reset(struct net_device *netdev)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ return mii_nway_restart(&ks->mii);
+}
+
+static const struct ethtool_ops ks_ethtool_ops = {
+ .get_drvinfo = ks_get_drvinfo,
+ .get_msglevel = ks_get_msglevel,
+ .set_msglevel = ks_set_msglevel,
+ .get_settings = ks_get_settings,
+ .set_settings = ks_set_settings,
+ .get_link = ks_get_link,
+ .nway_reset = ks_nway_reset,
+};
+
+/* MII interface controls */
+
+/**
+ * ks_phy_reg - convert MII register into a KS8851 register
+ * @reg: MII register number.
+ *
+ * Return the KS8851 register number for the corresponding MII PHY register
+ * if possible. Return zero if the MII register has no direct mapping to the
+ * KS8851 register set.
+ */
+static int ks_phy_reg(int reg)
+{
+ switch (reg) {
+ case MII_BMCR:
+ return KS_P1MBCR;
+ case MII_BMSR:
+ return KS_P1MBSR;
+ case MII_PHYSID1:
+ return KS_PHY1ILR;
+ case MII_PHYSID2:
+ return KS_PHY1IHR;
+ case MII_ADVERTISE:
+ return KS_P1ANAR;
+ case MII_LPA:
+ return KS_P1ANLPR;
+ }
+
+ return 0x0;
+}
+
+/**
+ * ks_phy_read - MII interface PHY register read.
+ * @netdev: The network device the PHY is on.
+ * @phy_addr: Address of PHY (ignored as we only have one)
+ * @reg: The register to read.
+ *
+ * This call reads data from the PHY register specified in @reg. Since the
+ * device does not support all the MII registers, the non-existant values
+ * are always returned as zero.
+ *
+ * We return zero for unsupported registers as the MII code does not check
+ * the value returned for any error status, and simply returns it to the
+ * caller. The mii-tool that the driver was tested with takes any -ve error
+ * as real PHY capabilities, thus displaying incorrect data to the user.
+ */
+static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int ksreg;
+ int result;
+
+ ksreg = ks_phy_reg(reg);
+ if (!ksreg)
+ return 0x0; /* no error return allowed, so use zero */
+
+ mutex_lock(&ks->lock);
+ result = ks_rdreg16(ks, ksreg);
+ mutex_unlock(&ks->lock);
+
+ return result;
+}
+
+static void ks_phy_write(struct net_device *netdev,
+ int phy, int reg, int value)
+{
+ struct ks_net *ks = netdev_priv(netdev);
+ int ksreg;
+
+ ksreg = ks_phy_reg(reg);
+ if (ksreg) {
+ mutex_lock(&ks->lock);
+ ks_wrreg16(ks, ksreg, value);
+ mutex_unlock(&ks->lock);
+ }
+}
+
+/**
+ * ks_read_selftest - read the selftest memory info.
+ * @ks: The device state
+ *
+ * Read and check the TX/RX memory selftest information.
+ */
+static int ks_read_selftest(struct ks_net *ks)
+{
+ unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
+ int ret = 0;
+ unsigned rd;
+
+ rd = ks_rdreg16(ks, KS_MBIR);
+
+ if ((rd & both_done) != both_done) {
+ ks_warn(ks, "Memory selftest not finished\n");
+ return 0;
+ }
+
+ if (rd & MBIR_TXMBFA) {
+ ks_err(ks, "TX memory selftest fails\n");
+ ret |= 1;
+ }
+
+ if (rd & MBIR_RXMBFA) {
+ ks_err(ks, "RX memory selftest fails\n");
+ ret |= 2;
+ }
+
+ ks_info(ks, "the selftest passes\n");
+ return ret;
+}
+
+static void ks_disable(struct ks_net *ks)
+{
+ u16 w;
+
+ w = ks_rdreg16(ks, KS_TXCR);
+
+ /* Disables QMU Transmit (TXCR). */
+ w &= ~TXCR_TXE;
+ ks_wrreg16(ks, KS_TXCR, w);
+
+ /* Disables QMU Receive (RXCR1). */
+ w = ks_rdreg16(ks, KS_RXCR1);
+ w &= ~RXCR1_RXE ;
+ ks_wrreg16(ks, KS_RXCR1, w);
+
+ ks->enabled = false;
+
+} /* ks_disable */
+
+static void ks_setup(struct ks_net *ks)
+{
+ u16 w;
+
+ /**
+ * Configure QMU Transmit
+ */
+
+ /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
+ ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
+
+ /* Setup Receive Frame Data Pointer Auto-Increment */
+ ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+
+ /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
+ ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+
+ /* Setup RxQ Command Control (RXQCR) */
+ ks->rc_rxqcr = RXQCR_CMD_CNTL;
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+
+ /**
+ * set the force mode to half duplex, default is full duplex
+ * because if the auto-negotiation fails, most switch uses
+ * half-duplex.
+ */
+
+ w = ks_rdreg16(ks, KS_P1MBCR);
+ w &= ~P1MBCR_FORCE_FDX;
+ ks_wrreg16(ks, KS_P1MBCR, w);
+
+ w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
+ ks_wrreg16(ks, KS_TXCR, w);
+
+ w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
+
+ if (ks->promiscuous) /* bPromiscuous */
+ w |= (RXCR1_RXAE | RXCR1_RXINVF);
+ else if (ks->all_mcast) /* Multicast address passed mode */
+ w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
+ else /* Normal mode */
+ w |= RXCR1_RXPAFMA;
+
+ ks_wrreg16(ks, KS_RXCR1, w);
+} /*ks_setup */
+
+
+static void ks_setup_int(struct ks_net *ks)
+{
+ ks->rc_ier = 0x00;
+ /* Clear the interrupts status of the hardware. */
+ ks_wrreg16(ks, KS_ISR, 0xffff);
+
+ /* Enables the interrupts of the hardware. */
+ ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
+} /* ks_setup_int */
+
+void ks_enable(struct ks_net *ks)
+{
+ u16 w;
+
+ w = ks_rdreg16(ks, KS_TXCR);
+ /* Enables QMU Transmit (TXCR). */
+ ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
+
+ /*
+ * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
+ * Enable
+ */
+
+ w = ks_rdreg16(ks, KS_RXQCR);
+ ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
+
+ /* Enables QMU Receive (RXCR1). */
+ w = ks_rdreg16(ks, KS_RXCR1);
+ ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
+ ks->enabled = true;
+} /* ks_enable */
+
+static int ks_hw_init(struct ks_net *ks)
+{
+#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
+ ks->promiscuous = 0;
+ ks->all_mcast = 0;
+ ks->mcast_lst_size = 0;
+
+ ks->frame_head_info = (struct type_frame_head *) \
+ kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ if (!ks->frame_head_info) {
+ printk(KERN_ERR "Error: Fail to allocate frame memory\n");
+ return false;
+ }
+
+ ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
+ return true;
+}
+
+
+static int __devinit ks8851_probe(struct platform_device *pdev)
+{
+ int err = -ENOMEM;
+ struct resource *io_d, *io_c;
+ struct net_device *netdev;
+ struct ks_net *ks;
+ u16 id, data;
+
+ io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
+ goto err_mem_region;
+
+ if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
+ goto err_mem_region1;
+
+ netdev = alloc_etherdev(sizeof(struct ks_net));
+ if (!netdev)
+ goto err_alloc_etherdev;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ ks = netdev_priv(netdev);
+ ks->netdev = netdev;
+ ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
+
+ if (!ks->hw_addr)
+ goto err_ioremap;
+
+ ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
+ if (!ks->hw_addr_cmd)
+ goto err_ioremap1;
+
+ ks->irq = platform_get_irq(pdev, 0);
+
+ if (ks->irq < 0) {
+ err = ks->irq;
+ goto err_get_irq;
+ }
+
+ ks->pdev = pdev;
+
+ mutex_init(&ks->lock);
+ spin_lock_init(&ks->statelock);
+
+ netdev->netdev_ops = &ks_netdev_ops;
+ netdev->ethtool_ops = &ks_ethtool_ops;
+
+ /* setup mii state */
+ ks->mii.dev = netdev;
+ ks->mii.phy_id = 1,
+ ks->mii.phy_id_mask = 1;
+ ks->mii.reg_num_mask = 0xf;
+ ks->mii.mdio_read = ks_phy_read;
+ ks->mii.mdio_write = ks_phy_write;
+
+ ks_info(ks, "message enable is %d\n", msg_enable);
+ /* set the default message enable */
+ ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
+ NETIF_MSG_PROBE |
+ NETIF_MSG_LINK));
+ ks_read_config(ks);
+
+ /* simple check for a valid chip being connected to the bus */
+ if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+ ks_err(ks, "failed to read device ID\n");
+ err = -ENODEV;
+ goto err_register;
+ }
+
+ if (ks_read_selftest(ks)) {
+ ks_err(ks, "failed to read device ID\n");
+ err = -ENODEV;
+ goto err_register;
+ }
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ platform_set_drvdata(pdev, netdev);
+
+ ks_soft_reset(ks, GRR_GSR);
+ ks_hw_init(ks);
+ ks_disable(ks);
+ ks_setup(ks);
+ ks_setup_int(ks);
+ ks_enable_int(ks);
+ ks_enable(ks);
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
+
+ data = ks_rdreg16(ks, KS_OBCR);
+ ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+
+ /**
+ * If you want to use the default MAC addr,
+ * comment out the 2 functions below.
+ */
+
+ random_ether_addr(netdev->dev_addr);
+ ks_set_mac(ks, netdev->dev_addr);
+
+ id = ks_rdreg16(ks, KS_CIDER);
+
+ printk(KERN_INFO DRV_NAME
+ " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
+ (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
+ return 0;
+
+err_register:
+err_get_irq:
+ iounmap(ks->hw_addr_cmd);
+err_ioremap1:
+ iounmap(ks->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ release_mem_region(io_c->start, resource_size(io_c));
+err_mem_region1:
+ release_mem_region(io_d->start, resource_size(io_d));
+err_mem_region:
+ return err;
+}
+
+static int __devexit ks8851_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct ks_net *ks = netdev_priv(netdev);
+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ unregister_netdev(netdev);
+ iounmap(ks->hw_addr);
+ free_netdev(netdev);
+ release_mem_region(iomem->start, resource_size(iomem));
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+
+}
+
+static struct platform_driver ks8851_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8851_probe,
+ .remove = __devexit_p(ks8851_remove),
+};
+
+static int __init ks8851_init(void)
+{
+ return platform_driver_register(&ks8851_platform_driver);
+}
+
+static void __exit ks8851_exit(void)
+{
+ platform_driver_unregister(&ks8851_platform_driver);
+}
+
+module_init(ks8851_init);
+module_exit(ks8851_exit);
+
+MODULE_DESCRIPTION("KS8851 MLL Network driver");
+MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
+MODULE_LICENSE("GPL");
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 92ceb689b4d..2af81735386 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -828,7 +828,7 @@ static int __exit meth_remove(struct platform_device *pdev)
static struct platform_driver meth_driver = {
.probe = meth_probe,
- .remove = __devexit_p(meth_remove),
+ .remove = __exit_p(meth_remove),
.driver = {
.name = "meth",
.owner = THIS_MODULE,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index cee199ceba2..3c16602172f 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -33,6 +33,7 @@
*/
#include <linux/mlx4/cmd.h>
+#include <linux/cache.h>
#include "fw.h"
#include "icm.h"
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
+#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
+ *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
+ (ilog2(cache_line_size()) - 4) << 5;
+
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
#elif defined(__BIG_ENDIAN)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3dd481e77f9..5dd7225b178 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1282,6 +1282,7 @@ static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
{ 0, }
};
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f7bdde111df..7fc15e9e8ad 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
void __iomem *mem_ptr2 = NULL;
void __iomem *db_ptr = NULL;
- unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
+ resource_size_t mem_base, db_base;
+ unsigned long mem_len, db_len = 0, pci_len0 = 0;
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
@@ -1469,6 +1470,7 @@ netxen_nic_resume(struct pci_dev *pdev)
}
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+ return 0;
err_out_detach:
netxen_nic_detach(adapter);
@@ -1713,7 +1715,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
- if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) {
+ if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
}
@@ -1903,12 +1905,13 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
- goto done;
+ clear_bit(__NX_RESETTING, &adapter->state);
} else {
+ clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
adapter->netdev->trans_start = jiffies;
- goto done;
+ return;
}
/* context reset failed, fall through for fw reset */
@@ -1916,8 +1919,6 @@ static void netxen_tx_timeout_task(struct work_struct *work)
request_reset:
adapter->need_fw_reset = 1;
-done:
- clear_bit(__NX_RESETTING, &adapter->state);
}
struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index c594e194647..57fd483dbb1 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -111,6 +111,7 @@
#include <linux/compiler.h>
#include <linux/prefetch.h>
#include <linux/ethtool.h>
+#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/if_vlan.h>
#include <linux/rtnetlink.h>
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 064a4fe1dd9..28a86224879 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -71,6 +71,9 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
struct pasemi_mac *mac = netdev_priv(netdev);
struct phy_device *phydev = mac->phydev;
+ if (!phydev)
+ return -EOPNOTSUPP;
+
return phy_ethtool_gset(phydev, cmd);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index ee8ad3e180d..b58965a2b3a 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev);
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static void set_rx_mode(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
static void tc574_detach(struct pcmcia_device *p_dev);
@@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = {
.ndo_tx_timeout = el3_tx_timeout,
.ndo_get_stats = el3_get_stats,
.ndo_do_ioctl = el3_ioctl,
- .ndo_set_multicast_list = set_rx_mode,
+ .ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev)
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
}
+static void set_multicast_list(struct net_device *dev)
+{
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->window_lock, flags);
+ set_rx_mode(dev);
+ spin_unlock_irqrestore(&lp->window_lock, flags);
+}
+
static int el3_close(struct net_device *dev)
{
unsigned int ioaddr = dev->base_addr;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 97db1c73234..bd3447f0490 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -340,12 +340,11 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
base = &virt[hw_info[i].offset & (req.Size-1)];
if ((readb(base+0) == hw_info[i].a0) &&
(readb(base+2) == hw_info[i].a1) &&
- (readb(base+4) == hw_info[i].a2))
- break;
- }
- if (i < NR_INFO) {
- for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ (readb(base+4) == hw_info[i].a2)) {
+ for (j = 0; j < 6; j++)
+ dev->dev_addr[j] = readb(base + (j<<1));
+ break;
+ }
}
iounmap(virt);
@@ -1755,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"),
PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"),
- PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
- PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
- PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
+ PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b),
PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
0xb4be14e3, 0x43ac239b, 0x0877b627),
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 6d28b18e7e2..c1b3f09f452 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -31,6 +31,7 @@ static const char *const version =
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 250e10f2c35..8659d341e76 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
static struct of_platform_driver mdio_ofgpio_driver = {
.name = "mdio-gpio",
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index cc394d07375..5910df60c93 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2179,7 +2179,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
* session or the special tunnel type.
*/
static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, int optlen)
+ char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct pppol2tp_session *session = sk->sk_user_data;
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a9845a2f243..e7285f01bd0 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
/*
* General definitions...
@@ -135,9 +136,9 @@ enum {
RST_FO_TFO = (1 << 0),
RST_FO_RR_MASK = 0x00060000,
RST_FO_RR_CQ_CAM = 0x00000000,
- RST_FO_RR_DROP = 0x00000001,
- RST_FO_RR_DQ = 0x00000002,
- RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
+ RST_FO_RR_DROP = 0x00000002,
+ RST_FO_RR_DQ = 0x00000004,
+ RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
RST_FO_FRB = (1 << 12),
RST_FO_MOP = (1 << 13),
RST_FO_REG = (1 << 14),
@@ -802,6 +803,12 @@ enum {
MB_CMD_SET_PORT_CFG = 0x00000122,
MB_CMD_GET_PORT_CFG = 0x00000123,
MB_CMD_GET_LINK_STS = 0x00000124,
+ MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
+ MB_SET_MPI_TFK_STOP = (1 << 0),
+ MB_SET_MPI_TFK_RESUME = (1 << 1),
+ MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
+ MB_GET_MPI_TFK_STOPPED = (1 << 0),
+ MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
/* Mailbox Command Status. */
MB_CMD_STS_GOOD = 0x00004000, /* Success. */
@@ -1167,7 +1174,7 @@ struct ricb {
#define RSS_RI6 0x40
#define RSS_RT6 0x80
__le16 mask;
- __le32 hash_cq_id[256];
+ u8 hash_cq_id[1024];
__le32 ipv6_hash_key[10];
__le32 ipv4_hash_key[4];
} __attribute((packed));
@@ -1381,15 +1388,15 @@ struct intr_context {
/* adapter flags definitions. */
enum {
- QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */
- QL_LEGACY_ENABLED = (1 << 3),
- QL_MSI_ENABLED = (1 << 3),
- QL_MSIX_ENABLED = (1 << 4),
- QL_DMA64 = (1 << 5),
- QL_PROMISCUOUS = (1 << 6),
- QL_ALLMULTI = (1 << 7),
- QL_PORT_CFG = (1 << 8),
- QL_CAM_RT_SET = (1 << 9),
+ QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
+ QL_LEGACY_ENABLED = 1,
+ QL_MSI_ENABLED = 2,
+ QL_MSIX_ENABLED = 3,
+ QL_DMA64 = 4,
+ QL_PROMISCUOUS = 5,
+ QL_ALLMULTI = 6,
+ QL_PORT_CFG = 7,
+ QL_CAM_RT_SET = 8,
};
/* link_status bit definitions */
@@ -1477,7 +1484,6 @@ struct ql_adapter {
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
- struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
@@ -1606,6 +1612,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_mb_about_fw(struct ql_adapter *qdev);
void ql_link_on(struct ql_adapter *qdev);
void ql_link_off(struct ql_adapter *qdev);
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
+int ql_wait_fifo_empty(struct ql_adapter *qdev);
#if 1
#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 68f9bd280f8..52073946bce 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -45,7 +45,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
if (!netif_running(qdev->ndev))
return status;
- spin_lock(&qdev->hw_lock);
/* Skip the default queue, and update the outbound handler
* queues if they changed.
*/
@@ -92,7 +91,6 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
}
}
exit:
- spin_unlock(&qdev->hw_lock);
return status;
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 7783c5db81d..48b45df85ec 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -34,7 +34,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -321,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
switch (type) {
case MAC_ADDR_TYPE_MULTI_MAC:
+ {
+ u32 upper = (addr[0] << 8) | addr[1];
+ u32 lower = (addr[2] << 24) | (addr[3] << 16) |
+ (addr[4] << 8) | (addr[5]);
+
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+ ql_write32(qdev, MAC_ADDR_DATA, lower);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
+ (index << MAC_ADDR_IDX_SHIFT) |
+ type | MAC_ADDR_E);
+
+ ql_write32(qdev, MAC_ADDR_DATA, upper);
+ status =
+ ql_wait_reg_rdy(qdev,
+ MAC_ADDR_IDX, MAC_ADDR_MW, 0);
+ if (status)
+ goto exit;
+ break;
+ }
case MAC_ADDR_TYPE_CAM_MAC:
{
u32 cam_output;
@@ -366,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
and possibly the function id. Right now we hardcode
the route field to NIC core.
*/
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->vlgrp)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
- }
+ cam_output = (CAM_OUT_ROUTE_NIC |
+ (qdev->
+ func << CAM_OUT_FUNC_SHIFT) |
+ (0 << CAM_OUT_CQ_ID_SHIFT));
+ if (qdev->vlgrp)
+ cam_output |= CAM_OUT_RV;
+ /* route to NIC core */
+ ql_write32(qdev, MAC_ADDR_DATA, cam_output);
break;
}
case MAC_ADDR_TYPE_VLAN:
@@ -547,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
}
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
{
- value = RT_IDX_DST_CAM_Q | /* dest */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
}
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
{
- value = RT_IDX_DST_CAM_Q | /* dest */
+ value = RT_IDX_DST_DFLT_Q | /* dest */
RT_IDX_TYPE_NICQ | /* type */
(RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
break;
@@ -1926,12 +1954,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return;
- spin_lock(&qdev->hw_lock);
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
}
- spin_unlock(&qdev->hw_lock);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -1945,12 +1971,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
if (status)
return;
- spin_lock(&qdev->hw_lock);
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
}
- spin_unlock(&qdev->hw_lock);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -2001,15 +2025,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
/*
* Check MPI processor activity.
*/
- if (var & STS_PI) {
+ if ((var & STS_PI) &&
+ (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
/*
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
- queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
- &qdev->mpi_work, 0);
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
+ queue_delayed_work_on(smp_processor_id(),
+ qdev->workqueue, &qdev->mpi_work, 0);
work_done++;
}
@@ -3080,6 +3106,12 @@ err_irq:
static int ql_start_rss(struct ql_adapter *qdev)
{
+ u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
+ 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
+ 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
+ 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
+ 0xbe, 0xac, 0x01, 0xfa};
struct ricb *ricb = &qdev->ricb;
int status = 0;
int i;
@@ -3089,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev)
ricb->base_cq = RSS_L4K;
ricb->flags =
- (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
- RSS_RT6);
- ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
+ (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
+ ricb->mask = cpu_to_le16((u16)(0x3ff));
/*
* Fill out the Indirection Table.
*/
- for (i = 0; i < 256; i++)
- hash_id[i] = i & (qdev->rss_ring_count - 1);
+ for (i = 0; i < 1024; i++)
+ hash_id[i] = (i & (qdev->rss_ring_count - 1));
- /*
- * Random values for the IPv6 and IPv4 Hash Keys.
- */
- get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
- get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
+ memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
+ memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
@@ -3142,14 +3170,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
{
int status = 0;
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ /* Clear all the entries in the routing table. */
+ status = ql_clear_routing_entries(qdev);
if (status)
return status;
- /* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
- goto exit;
+ return status;
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
@@ -3242,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
+ /* Set RX packet routing to use port/pci function on which the
+ * packet arrived on in addition to usual frame routing.
+ * This is helpful on bonding where both interfaces can have
+ * the same MAC address.
+ */
+ ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
+
/* Start up the rx queues. */
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
@@ -3314,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
+
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
do {
@@ -3329,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
status = -ETIMEDOUT;
}
+ /* Resume management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
return status;
}
@@ -3380,12 +3424,10 @@ static int ql_adapter_down(struct ql_adapter *qdev)
ql_free_rx_buffers(qdev);
- spin_lock(&qdev->hw_lock);
status = ql_adapter_reset(qdev);
if (status)
QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
qdev->func);
- spin_unlock(&qdev->hw_lock);
return status;
}
@@ -3587,7 +3629,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
if (status)
return;
- spin_lock(&qdev->hw_lock);
/*
* Set or clear promiscuous mode if a
* transition is taking place.
@@ -3664,7 +3705,6 @@ static void qlge_set_multicast_list(struct net_device *ndev)
}
}
exit:
- spin_unlock(&qdev->hw_lock);
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
}
@@ -3684,10 +3724,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- spin_lock(&qdev->hw_lock);
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- spin_unlock(&qdev->hw_lock);
if (status)
QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -3705,7 +3743,7 @@ static void ql_asic_reset_work(struct work_struct *work)
struct ql_adapter *qdev =
container_of(work, struct ql_adapter, asic_reset_work.work);
int status;
-
+ rtnl_lock();
status = ql_adapter_down(qdev);
if (status)
goto error;
@@ -3714,11 +3752,17 @@ static void ql_asic_reset_work(struct work_struct *work)
if (status)
goto error;
+ /* Restore rx mode. */
+ clear_bit(QL_ALLMULTI, &qdev->flags);
+ clear_bit(QL_PROMISCUOUS, &qdev->flags);
+ qlge_set_multicast_list(qdev->ndev);
+
+ rtnl_unlock();
return;
error:
QPRINTK(qdev, IFUP, ALERT,
"Driver up/down cycle failed, closing device\n");
- rtnl_lock();
+
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
rtnl_unlock();
@@ -3834,11 +3878,14 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
return err;
}
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ pci_set_drvdata(pdev, ndev);
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pos <= 0) {
dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
"aborting.\n");
- goto err_out;
+ return pos;
} else {
pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
@@ -3851,7 +3898,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "PCI region request failed.\n");
- goto err_out;
+ return err;
}
pci_set_master(pdev);
@@ -3869,7 +3916,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
goto err_out;
}
- pci_set_drvdata(pdev, ndev);
qdev->reg_base =
ioremap_nocache(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
@@ -3889,8 +3935,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
goto err_out;
}
- qdev->ndev = ndev;
- qdev->pdev = pdev;
err = ql_get_board_info(qdev);
if (err) {
dev_err(&pdev->dev, "Register access failed.\n");
@@ -3930,7 +3974,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
- mutex_init(&qdev->mpi_mutex);
init_completion(&qdev->ide_completion);
if (!cards_found) {
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 6685bd97da9..99e58e3f8e2 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -472,7 +472,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int status, count;
- mutex_lock(&qdev->mpi_mutex);
/* Begin polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
@@ -541,7 +540,6 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = -EIO;
}
end:
- mutex_unlock(&qdev->mpi_mutex);
/* End polled mode for MPI */
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
return status;
@@ -770,13 +768,104 @@ static int ql_idc_wait(struct ql_adapter *qdev)
return status;
}
+int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 2;
+
+ mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
+ mbcp->mbox_in[1] = control;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ /* This indicates that the firmware is
+ * already in the state we are trying to
+ * change it to.
+ */
+ QPRINTK(qdev, DRV, ERR,
+ "Command parameters make no change.\n");
+ }
+ return status;
+}
+
+/* Returns a negative error code or the mailbox command status. */
+static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+ *control = 0;
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
+ *control = mbcp->mbox_in[1];
+ return status;
+ }
+
+ if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
+ QPRINTK(qdev, DRV, ERR,
+ "Command not supported by firmware.\n");
+ status = -EINVAL;
+ } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
+ QPRINTK(qdev, DRV, ERR,
+ "Failed to get MPI traffic control.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+int ql_wait_fifo_empty(struct ql_adapter *qdev)
+{
+ int count = 5;
+ u32 mgmnt_fifo_empty;
+ u32 nic_fifo_empty;
+
+ do {
+ nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
+ ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
+ mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
+ if (nic_fifo_empty && mgmnt_fifo_empty)
+ return 0;
+ msleep(100);
+ } while (count-- > 0);
+ return -ETIMEDOUT;
+}
+
/* API called in work thread context to set new TX/RX
* maximum frame size values to match MTU.
*/
static int ql_set_port_cfg(struct ql_adapter *qdev)
{
int status;
+ rtnl_lock();
status = ql_mb_set_port_cfg(qdev);
+ rtnl_unlock();
if (status)
return status;
status = ql_idc_wait(qdev);
@@ -797,7 +886,9 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
int status;
+ rtnl_lock();
status = ql_mb_get_port_cfg(qdev);
+ rtnl_unlock();
if (status) {
QPRINTK(qdev, DRV, ERR,
"Bug: Failed to get port config data.\n");
@@ -855,7 +946,9 @@ void ql_mpi_idc_work(struct work_struct *work)
* needs to be set.
* */
set_bit(QL_CAM_RT_SET, &qdev->flags);
+ rtnl_lock();
status = ql_mb_idc_ack(qdev);
+ rtnl_unlock();
if (status) {
QPRINTK(qdev, DRV, ERR,
"Bug: No pending IDC!\n");
@@ -871,7 +964,9 @@ void ql_mpi_work(struct work_struct *work)
struct mbox_params *mbcp = &mbc;
int err = 0;
- mutex_lock(&qdev->mpi_mutex);
+ rtnl_lock();
+ /* Begin polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
while (ql_read32(qdev, STS) & STS_PI) {
memset(mbcp, 0, sizeof(struct mbox_params));
@@ -884,7 +979,9 @@ void ql_mpi_work(struct work_struct *work)
break;
}
- mutex_unlock(&qdev->mpi_mutex);
+ /* End polled mode for MPI */
+ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
+ rtnl_unlock();
ql_enable_completion_interrupt(qdev, 0);
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 50c6a3cfe43..83c47d95c3a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -115,7 +115,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
- RTL_GIGA_MAC_VER_25 = 0x19 // 8168D
+ RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
+ RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
+ RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP
};
#define _R(NAME,MAC,MASK) \
@@ -150,7 +152,9 @@ static const struct {
_R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
_R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
_R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
- _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E
+ _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
+ _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
+ _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E
};
#undef _R
@@ -253,6 +257,13 @@ enum rtl8168_8101_registers {
DBG_REG = 0xd1,
#define FIX_NAK_1 (1 << 4)
#define FIX_NAK_2 (1 << 3)
+ EFUSEAR = 0xdc,
+#define EFUSEAR_FLAG 0x80000000
+#define EFUSEAR_WRITE_CMD 0x80000000
+#define EFUSEAR_READ_CMD 0x00000000
+#define EFUSEAR_REG_MASK 0x03ff
+#define EFUSEAR_REG_SHIFT 8
+#define EFUSEAR_DATA_MASK 0xff
};
enum rtl_register_content {
@@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
}
+static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m)
+{
+ int val;
+
+ val = mdio_read(ioaddr, reg_addr);
+ mdio_write(ioaddr, reg_addr, (val | p) & ~m);
+}
+
static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
int val)
{
@@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
return value;
}
+static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+{
+ u8 value = 0xff;
+ unsigned int i;
+
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ for (i = 0; i < 300; i++) {
+ if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
+ value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
+ break;
+ }
+ udelay(100);
+ }
+
+ return value;
+}
+
static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
{
RTL_W16(IntrMask, 0x0000);
@@ -1243,7 +1280,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
int mac_version;
} mac_info[] = {
/* 8168D family. */
- { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 },
+ { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
+ { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
+ { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
/* 8168C family. */
{ 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
@@ -1648,74 +1688,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
rtl8168c_3_hw_phy_config(ioaddr);
}
-static void rtl8168d_hw_phy_config(void __iomem *ioaddr)
+static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
{
- struct phy_reg phy_reg_init_0[] = {
+ static struct phy_reg phy_reg_init_0[] = {
{ 0x1f, 0x0001 },
- { 0x09, 0x2770 },
- { 0x08, 0x04d0 },
- { 0x0b, 0xad15 },
- { 0x0c, 0x5bf0 },
- { 0x1c, 0xf101 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
{ 0x1f, 0x0003 },
- { 0x14, 0x94d7 },
- { 0x12, 0xf4d6 },
- { 0x09, 0xca0f },
- { 0x1f, 0x0002 },
- { 0x0b, 0x0b10 },
- { 0x0c, 0xd1f7 },
- { 0x1f, 0x0002 },
- { 0x06, 0x5461 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 }
+ };
+ static struct phy_reg phy_reg_init_1[] = {
{ 0x1f, 0x0002 },
- { 0x05, 0x6662 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 }
+ };
+ static struct phy_reg phy_reg_init_2[] = {
+ { 0x1f, 0x0005 },
+ { 0x05, 0xffc2 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8000 },
+ { 0x06, 0xf8f9 },
+ { 0x06, 0xfaef },
+ { 0x06, 0x59ee },
+ { 0x06, 0xf8ea },
+ { 0x06, 0x00ee },
+ { 0x06, 0xf8eb },
+ { 0x06, 0x00e0 },
+ { 0x06, 0xf87c },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x7d59 },
+ { 0x06, 0x0fef },
+ { 0x06, 0x0139 },
+ { 0x06, 0x029e },
+ { 0x06, 0x06ef },
+ { 0x06, 0x1039 },
+ { 0x06, 0x089f },
+ { 0x06, 0x2aee },
+ { 0x06, 0xf8ea },
+ { 0x06, 0x00ee },
+ { 0x06, 0xf8eb },
+ { 0x06, 0x01e0 },
+ { 0x06, 0xf87c },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x7d58 },
+ { 0x06, 0x409e },
+ { 0x06, 0x0f39 },
+ { 0x06, 0x46aa },
+ { 0x06, 0x0bbf },
+ { 0x06, 0x8290 },
+ { 0x06, 0xd682 },
+ { 0x06, 0x9802 },
+ { 0x06, 0x014f },
+ { 0x06, 0xae09 },
+ { 0x06, 0xbf82 },
+ { 0x06, 0x98d6 },
+ { 0x06, 0x82a0 },
+ { 0x06, 0x0201 },
+ { 0x06, 0x4fef },
+ { 0x06, 0x95fe },
+ { 0x06, 0xfdfc },
+ { 0x06, 0x05f8 },
+ { 0x06, 0xf9fa },
+ { 0x06, 0xeef8 },
+ { 0x06, 0xea00 },
+ { 0x06, 0xeef8 },
+ { 0x06, 0xeb00 },
+ { 0x06, 0xe2f8 },
+ { 0x06, 0x7ce3 },
+ { 0x06, 0xf87d },
+ { 0x06, 0xa511 },
+ { 0x06, 0x1112 },
+ { 0x06, 0xd240 },
+ { 0x06, 0xd644 },
+ { 0x06, 0x4402 },
+ { 0x06, 0x8217 },
+ { 0x06, 0xd2a0 },
+ { 0x06, 0xd6aa },
+ { 0x06, 0xaa02 },
+ { 0x06, 0x8217 },
+ { 0x06, 0xae0f },
+ { 0x06, 0xa544 },
+ { 0x06, 0x4402 },
+ { 0x06, 0xae4d },
+ { 0x06, 0xa5aa },
+ { 0x06, 0xaa02 },
+ { 0x06, 0xae47 },
+ { 0x06, 0xaf82 },
+ { 0x06, 0x13ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x00ee },
+ { 0x06, 0x834d },
+ { 0x06, 0x0fee },
+ { 0x06, 0x834c },
+ { 0x06, 0x0fee },
+ { 0x06, 0x834f },
+ { 0x06, 0x00ee },
+ { 0x06, 0x8351 },
+ { 0x06, 0x00ee },
+ { 0x06, 0x834a },
+ { 0x06, 0xffee },
+ { 0x06, 0x834b },
+ { 0x06, 0xffe0 },
+ { 0x06, 0x8330 },
+ { 0x06, 0xe183 },
+ { 0x06, 0x3158 },
+ { 0x06, 0xfee4 },
+ { 0x06, 0xf88a },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x8be0 },
+ { 0x06, 0x8332 },
+ { 0x06, 0xe183 },
+ { 0x06, 0x3359 },
+ { 0x06, 0x0fe2 },
+ { 0x06, 0x834d },
+ { 0x06, 0x0c24 },
+ { 0x06, 0x5af0 },
+ { 0x06, 0x1e12 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x8ce5 },
+ { 0x06, 0xf88d },
+ { 0x06, 0xaf82 },
+ { 0x06, 0x13e0 },
+ { 0x06, 0x834f },
+ { 0x06, 0x10e4 },
+ { 0x06, 0x834f },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x009f },
+ { 0x06, 0x0ae0 },
+ { 0x06, 0x834f },
+ { 0x06, 0xa010 },
+ { 0x06, 0xa5ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x01e0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7805 },
+ { 0x06, 0x9e9a },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x049e },
+ { 0x06, 0x10e0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7803 },
+ { 0x06, 0x9e0f },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x019e },
+ { 0x06, 0x05ae },
+ { 0x06, 0x0caf },
+ { 0x06, 0x81f8 },
+ { 0x06, 0xaf81 },
+ { 0x06, 0xa3af },
+ { 0x06, 0x81dc },
+ { 0x06, 0xaf82 },
+ { 0x06, 0x13ee },
+ { 0x06, 0x8348 },
+ { 0x06, 0x00ee },
+ { 0x06, 0x8349 },
+ { 0x06, 0x00e0 },
+ { 0x06, 0x8351 },
+ { 0x06, 0x10e4 },
+ { 0x06, 0x8351 },
+ { 0x06, 0x5801 },
+ { 0x06, 0x9fea },
+ { 0x06, 0xd000 },
+ { 0x06, 0xd180 },
+ { 0x06, 0x1f66 },
+ { 0x06, 0xe2f8 },
+ { 0x06, 0xeae3 },
+ { 0x06, 0xf8eb },
+ { 0x06, 0x5af8 },
+ { 0x06, 0x1e20 },
+ { 0x06, 0xe6f8 },
+ { 0x06, 0xeae5 },
+ { 0x06, 0xf8eb },
+ { 0x06, 0xd302 },
+ { 0x06, 0xb3fe },
+ { 0x06, 0xe2f8 },
+ { 0x06, 0x7cef },
+ { 0x06, 0x325b },
+ { 0x06, 0x80e3 },
+ { 0x06, 0xf87d },
+ { 0x06, 0x9e03 },
+ { 0x06, 0x7dff },
+ { 0x06, 0xff0d },
+ { 0x06, 0x581c },
+ { 0x06, 0x551a },
+ { 0x06, 0x6511 },
+ { 0x06, 0xa190 },
+ { 0x06, 0xd3e2 },
+ { 0x06, 0x8348 },
+ { 0x06, 0xe383 },
+ { 0x06, 0x491b },
+ { 0x06, 0x56ab },
+ { 0x06, 0x08ef },
+ { 0x06, 0x56e6 },
+ { 0x06, 0x8348 },
+ { 0x06, 0xe783 },
+ { 0x06, 0x4910 },
+ { 0x06, 0xd180 },
+ { 0x06, 0x1f66 },
+ { 0x06, 0xa004 },
+ { 0x06, 0xb9e2 },
+ { 0x06, 0x8348 },
+ { 0x06, 0xe383 },
+ { 0x06, 0x49ef },
+ { 0x06, 0x65e2 },
+ { 0x06, 0x834a },
+ { 0x06, 0xe383 },
+ { 0x06, 0x4b1b },
+ { 0x06, 0x56aa },
+ { 0x06, 0x0eef },
+ { 0x06, 0x56e6 },
+ { 0x06, 0x834a },
+ { 0x06, 0xe783 },
+ { 0x06, 0x4be2 },
+ { 0x06, 0x834d },
+ { 0x06, 0xe683 },
+ { 0x06, 0x4ce0 },
+ { 0x06, 0x834d },
+ { 0x06, 0xa000 },
+ { 0x06, 0x0caf },
+ { 0x06, 0x81dc },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4d10 },
+ { 0x06, 0xe483 },
+ { 0x06, 0x4dae },
+ { 0x06, 0x0480 },
+ { 0x06, 0xe483 },
+ { 0x06, 0x4de0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7803 },
+ { 0x06, 0x9e0b },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x049e },
+ { 0x06, 0x04ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x02e0 },
+ { 0x06, 0x8332 },
+ { 0x06, 0xe183 },
+ { 0x06, 0x3359 },
+ { 0x06, 0x0fe2 },
+ { 0x06, 0x834d },
+ { 0x06, 0x0c24 },
+ { 0x06, 0x5af0 },
+ { 0x06, 0x1e12 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x8ce5 },
+ { 0x06, 0xf88d },
+ { 0x06, 0xe083 },
+ { 0x06, 0x30e1 },
+ { 0x06, 0x8331 },
+ { 0x06, 0x6801 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x8ae5 },
+ { 0x06, 0xf88b },
+ { 0x06, 0xae37 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e03 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4ce1 },
+ { 0x06, 0x834d },
+ { 0x06, 0x1b01 },
+ { 0x06, 0x9e04 },
+ { 0x06, 0xaaa1 },
+ { 0x06, 0xaea8 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e04 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4f00 },
+ { 0x06, 0xaeab },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4f78 },
+ { 0x06, 0x039f },
+ { 0x06, 0x14ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x05d2 },
+ { 0x06, 0x40d6 },
+ { 0x06, 0x5554 },
+ { 0x06, 0x0282 },
+ { 0x06, 0x17d2 },
+ { 0x06, 0xa0d6 },
+ { 0x06, 0xba00 },
+ { 0x06, 0x0282 },
+ { 0x06, 0x17fe },
+ { 0x06, 0xfdfc },
+ { 0x06, 0x05f8 },
+ { 0x06, 0xe0f8 },
+ { 0x06, 0x60e1 },
+ { 0x06, 0xf861 },
+ { 0x06, 0x6802 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x60e5 },
+ { 0x06, 0xf861 },
+ { 0x06, 0xe0f8 },
+ { 0x06, 0x48e1 },
+ { 0x06, 0xf849 },
+ { 0x06, 0x580f },
+ { 0x06, 0x1e02 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x48e5 },
+ { 0x06, 0xf849 },
+ { 0x06, 0xd000 },
+ { 0x06, 0x0282 },
+ { 0x06, 0x5bbf },
+ { 0x06, 0x8350 },
+ { 0x06, 0xef46 },
+ { 0x06, 0xdc19 },
+ { 0x06, 0xddd0 },
+ { 0x06, 0x0102 },
+ { 0x06, 0x825b },
+ { 0x06, 0x0282 },
+ { 0x06, 0x77e0 },
+ { 0x06, 0xf860 },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x6158 },
+ { 0x06, 0xfde4 },
+ { 0x06, 0xf860 },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x61fc },
+ { 0x06, 0x04f9 },
+ { 0x06, 0xfafb },
+ { 0x06, 0xc6bf },
+ { 0x06, 0xf840 },
+ { 0x06, 0xbe83 },
+ { 0x06, 0x50a0 },
+ { 0x06, 0x0101 },
+ { 0x06, 0x071b },
+ { 0x06, 0x89cf },
+ { 0x06, 0xd208 },
+ { 0x06, 0xebdb },
+ { 0x06, 0x19b2 },
+ { 0x06, 0xfbff },
+ { 0x06, 0xfefd },
+ { 0x06, 0x04f8 },
+ { 0x06, 0xe0f8 },
+ { 0x06, 0x48e1 },
+ { 0x06, 0xf849 },
+ { 0x06, 0x6808 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x48e5 },
+ { 0x06, 0xf849 },
+ { 0x06, 0x58f7 },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x48e5 },
+ { 0x06, 0xf849 },
+ { 0x06, 0xfc04 },
+ { 0x06, 0x4d20 },
+ { 0x06, 0x0002 },
+ { 0x06, 0x4e22 },
+ { 0x06, 0x0002 },
+ { 0x06, 0x4ddf },
+ { 0x06, 0xff01 },
+ { 0x06, 0x4edd },
+ { 0x06, 0xff01 },
+ { 0x05, 0x83d4 },
+ { 0x06, 0x8000 },
+ { 0x05, 0x83d8 },
+ { 0x06, 0x8051 },
+ { 0x02, 0x6010 },
+ { 0x03, 0xdc00 },
+ { 0x05, 0xfff6 },
+ { 0x06, 0x00fc },
{ 0x1f, 0x0000 },
- { 0x14, 0x0060 },
+
{ 0x1f, 0x0000 },
- { 0x0d, 0xf8a0 },
+ { 0x0d, 0xf880 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+ mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
+ mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
+
+ rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
+
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = mdio_read(ioaddr, 0x0d);
+
+ if ((val & 0x00ff) != 0x006c) {
+ u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ mdio_write(ioaddr, 0x0d, val | set[i]);
+ }
+ } else {
+ struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x6662 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x6662 }
+ };
+
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ }
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+ mdio_patch(ioaddr, 0x0d, 0x0300);
+ mdio_patch(ioaddr, 0x0f, 0x0010);
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+ mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
+ mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
+
+ rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2));
+}
+
+static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
+{
+ static struct phy_reg phy_reg_init_0[] = {
+ { 0x1f, 0x0001 },
+ { 0x06, 0x4064 },
+ { 0x07, 0x2863 },
+ { 0x08, 0x059c },
+ { 0x09, 0x26b4 },
+ { 0x0a, 0x6a19 },
+ { 0x0b, 0xdcc8 },
+ { 0x10, 0xf06d },
+ { 0x14, 0x7f68 },
+ { 0x18, 0x7fd9 },
+ { 0x1c, 0xf0ff },
+ { 0x1d, 0x3d9c },
+ { 0x1f, 0x0003 },
+ { 0x12, 0xf49f },
+ { 0x13, 0x070b },
+ { 0x1a, 0x05ad },
+ { 0x14, 0x94c0 },
+
+ { 0x1f, 0x0002 },
+ { 0x06, 0x5561 },
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8332 },
+ { 0x06, 0x5561 }
+ };
+ static struct phy_reg phy_reg_init_1[] = {
+ { 0x1f, 0x0005 },
+ { 0x05, 0xffc2 },
{ 0x1f, 0x0005 },
- { 0x05, 0xffc2 }
+ { 0x05, 0x8000 },
+ { 0x06, 0xf8f9 },
+ { 0x06, 0xfaee },
+ { 0x06, 0xf8ea },
+ { 0x06, 0x00ee },
+ { 0x06, 0xf8eb },
+ { 0x06, 0x00e2 },
+ { 0x06, 0xf87c },
+ { 0x06, 0xe3f8 },
+ { 0x06, 0x7da5 },
+ { 0x06, 0x1111 },
+ { 0x06, 0x12d2 },
+ { 0x06, 0x40d6 },
+ { 0x06, 0x4444 },
+ { 0x06, 0x0281 },
+ { 0x06, 0xc6d2 },
+ { 0x06, 0xa0d6 },
+ { 0x06, 0xaaaa },
+ { 0x06, 0x0281 },
+ { 0x06, 0xc6ae },
+ { 0x06, 0x0fa5 },
+ { 0x06, 0x4444 },
+ { 0x06, 0x02ae },
+ { 0x06, 0x4da5 },
+ { 0x06, 0xaaaa },
+ { 0x06, 0x02ae },
+ { 0x06, 0x47af },
+ { 0x06, 0x81c2 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e00 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4d0f },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4c0f },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4f00 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x5100 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4aff },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4bff },
+ { 0x06, 0xe083 },
+ { 0x06, 0x30e1 },
+ { 0x06, 0x8331 },
+ { 0x06, 0x58fe },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x8ae5 },
+ { 0x06, 0xf88b },
+ { 0x06, 0xe083 },
+ { 0x06, 0x32e1 },
+ { 0x06, 0x8333 },
+ { 0x06, 0x590f },
+ { 0x06, 0xe283 },
+ { 0x06, 0x4d0c },
+ { 0x06, 0x245a },
+ { 0x06, 0xf01e },
+ { 0x06, 0x12e4 },
+ { 0x06, 0xf88c },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x8daf },
+ { 0x06, 0x81c2 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4f10 },
+ { 0x06, 0xe483 },
+ { 0x06, 0x4fe0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7800 },
+ { 0x06, 0x9f0a },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4fa0 },
+ { 0x06, 0x10a5 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e01 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x059e },
+ { 0x06, 0x9ae0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7804 },
+ { 0x06, 0x9e10 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x039e },
+ { 0x06, 0x0fe0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7801 },
+ { 0x06, 0x9e05 },
+ { 0x06, 0xae0c },
+ { 0x06, 0xaf81 },
+ { 0x06, 0xa7af },
+ { 0x06, 0x8152 },
+ { 0x06, 0xaf81 },
+ { 0x06, 0x8baf },
+ { 0x06, 0x81c2 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4800 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4900 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x5110 },
+ { 0x06, 0xe483 },
+ { 0x06, 0x5158 },
+ { 0x06, 0x019f },
+ { 0x06, 0xead0 },
+ { 0x06, 0x00d1 },
+ { 0x06, 0x801f },
+ { 0x06, 0x66e2 },
+ { 0x06, 0xf8ea },
+ { 0x06, 0xe3f8 },
+ { 0x06, 0xeb5a },
+ { 0x06, 0xf81e },
+ { 0x06, 0x20e6 },
+ { 0x06, 0xf8ea },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0xebd3 },
+ { 0x06, 0x02b3 },
+ { 0x06, 0xfee2 },
+ { 0x06, 0xf87c },
+ { 0x06, 0xef32 },
+ { 0x06, 0x5b80 },
+ { 0x06, 0xe3f8 },
+ { 0x06, 0x7d9e },
+ { 0x06, 0x037d },
+ { 0x06, 0xffff },
+ { 0x06, 0x0d58 },
+ { 0x06, 0x1c55 },
+ { 0x06, 0x1a65 },
+ { 0x06, 0x11a1 },
+ { 0x06, 0x90d3 },
+ { 0x06, 0xe283 },
+ { 0x06, 0x48e3 },
+ { 0x06, 0x8349 },
+ { 0x06, 0x1b56 },
+ { 0x06, 0xab08 },
+ { 0x06, 0xef56 },
+ { 0x06, 0xe683 },
+ { 0x06, 0x48e7 },
+ { 0x06, 0x8349 },
+ { 0x06, 0x10d1 },
+ { 0x06, 0x801f },
+ { 0x06, 0x66a0 },
+ { 0x06, 0x04b9 },
+ { 0x06, 0xe283 },
+ { 0x06, 0x48e3 },
+ { 0x06, 0x8349 },
+ { 0x06, 0xef65 },
+ { 0x06, 0xe283 },
+ { 0x06, 0x4ae3 },
+ { 0x06, 0x834b },
+ { 0x06, 0x1b56 },
+ { 0x06, 0xaa0e },
+ { 0x06, 0xef56 },
+ { 0x06, 0xe683 },
+ { 0x06, 0x4ae7 },
+ { 0x06, 0x834b },
+ { 0x06, 0xe283 },
+ { 0x06, 0x4de6 },
+ { 0x06, 0x834c },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4da0 },
+ { 0x06, 0x000c },
+ { 0x06, 0xaf81 },
+ { 0x06, 0x8be0 },
+ { 0x06, 0x834d },
+ { 0x06, 0x10e4 },
+ { 0x06, 0x834d },
+ { 0x06, 0xae04 },
+ { 0x06, 0x80e4 },
+ { 0x06, 0x834d },
+ { 0x06, 0xe083 },
+ { 0x06, 0x4e78 },
+ { 0x06, 0x039e },
+ { 0x06, 0x0be0 },
+ { 0x06, 0x834e },
+ { 0x06, 0x7804 },
+ { 0x06, 0x9e04 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e02 },
+ { 0x06, 0xe083 },
+ { 0x06, 0x32e1 },
+ { 0x06, 0x8333 },
+ { 0x06, 0x590f },
+ { 0x06, 0xe283 },
+ { 0x06, 0x4d0c },
+ { 0x06, 0x245a },
+ { 0x06, 0xf01e },
+ { 0x06, 0x12e4 },
+ { 0x06, 0xf88c },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x8de0 },
+ { 0x06, 0x8330 },
+ { 0x06, 0xe183 },
+ { 0x06, 0x3168 },
+ { 0x06, 0x01e4 },
+ { 0x06, 0xf88a },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x8bae },
+ { 0x06, 0x37ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x03e0 },
+ { 0x06, 0x834c },
+ { 0x06, 0xe183 },
+ { 0x06, 0x4d1b },
+ { 0x06, 0x019e },
+ { 0x06, 0x04aa },
+ { 0x06, 0xa1ae },
+ { 0x06, 0xa8ee },
+ { 0x06, 0x834e },
+ { 0x06, 0x04ee },
+ { 0x06, 0x834f },
+ { 0x06, 0x00ae },
+ { 0x06, 0xabe0 },
+ { 0x06, 0x834f },
+ { 0x06, 0x7803 },
+ { 0x06, 0x9f14 },
+ { 0x06, 0xee83 },
+ { 0x06, 0x4e05 },
+ { 0x06, 0xd240 },
+ { 0x06, 0xd655 },
+ { 0x06, 0x5402 },
+ { 0x06, 0x81c6 },
+ { 0x06, 0xd2a0 },
+ { 0x06, 0xd6ba },
+ { 0x06, 0x0002 },
+ { 0x06, 0x81c6 },
+ { 0x06, 0xfefd },
+ { 0x06, 0xfc05 },
+ { 0x06, 0xf8e0 },
+ { 0x06, 0xf860 },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x6168 },
+ { 0x06, 0x02e4 },
+ { 0x06, 0xf860 },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x61e0 },
+ { 0x06, 0xf848 },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x4958 },
+ { 0x06, 0x0f1e },
+ { 0x06, 0x02e4 },
+ { 0x06, 0xf848 },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x49d0 },
+ { 0x06, 0x0002 },
+ { 0x06, 0x820a },
+ { 0x06, 0xbf83 },
+ { 0x06, 0x50ef },
+ { 0x06, 0x46dc },
+ { 0x06, 0x19dd },
+ { 0x06, 0xd001 },
+ { 0x06, 0x0282 },
+ { 0x06, 0x0a02 },
+ { 0x06, 0x8226 },
+ { 0x06, 0xe0f8 },
+ { 0x06, 0x60e1 },
+ { 0x06, 0xf861 },
+ { 0x06, 0x58fd },
+ { 0x06, 0xe4f8 },
+ { 0x06, 0x60e5 },
+ { 0x06, 0xf861 },
+ { 0x06, 0xfc04 },
+ { 0x06, 0xf9fa },
+ { 0x06, 0xfbc6 },
+ { 0x06, 0xbff8 },
+ { 0x06, 0x40be },
+ { 0x06, 0x8350 },
+ { 0x06, 0xa001 },
+ { 0x06, 0x0107 },
+ { 0x06, 0x1b89 },
+ { 0x06, 0xcfd2 },
+ { 0x06, 0x08eb },
+ { 0x06, 0xdb19 },
+ { 0x06, 0xb2fb },
+ { 0x06, 0xfffe },
+ { 0x06, 0xfd04 },
+ { 0x06, 0xf8e0 },
+ { 0x06, 0xf848 },
+ { 0x06, 0xe1f8 },
+ { 0x06, 0x4968 },
+ { 0x06, 0x08e4 },
+ { 0x06, 0xf848 },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x4958 },
+ { 0x06, 0xf7e4 },
+ { 0x06, 0xf848 },
+ { 0x06, 0xe5f8 },
+ { 0x06, 0x49fc },
+ { 0x06, 0x044d },
+ { 0x06, 0x2000 },
+ { 0x06, 0x024e },
+ { 0x06, 0x2200 },
+ { 0x06, 0x024d },
+ { 0x06, 0xdfff },
+ { 0x06, 0x014e },
+ { 0x06, 0xddff },
+ { 0x06, 0x0100 },
+ { 0x05, 0x83d8 },
+ { 0x06, 0x8000 },
+ { 0x03, 0xdc00 },
+ { 0x05, 0xfff6 },
+ { 0x06, 0x00fc },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+ { 0x1f, 0x0000 }
};
rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- if (mdio_read(ioaddr, 0x06) == 0xc400) {
- struct phy_reg phy_reg_init_1[] = {
+ if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x669a },
{ 0x1f, 0x0005 },
- { 0x01, 0x0300 },
- { 0x1f, 0x0000 },
- { 0x11, 0x401c },
- { 0x16, 0x4100 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x669a },
+
+ { 0x1f, 0x0002 }
+ };
+ int val;
+
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ val = mdio_read(ioaddr, 0x0d);
+ if ((val & 0x00ff) != 0x006c) {
+ u32 set[] = {
+ 0x0065, 0x0066, 0x0067, 0x0068,
+ 0x0069, 0x006a, 0x006b, 0x006c
+ };
+ int i;
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+
+ val &= 0xff00;
+ for (i = 0; i < ARRAY_SIZE(set); i++)
+ mdio_write(ioaddr, 0x0d, val | set[i]);
+ }
+ } else {
+ struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x05, 0x2642 },
{ 0x1f, 0x0005 },
- { 0x07, 0x0010 },
- { 0x05, 0x83dc },
- { 0x06, 0x087d },
- { 0x05, 0x8300 },
- { 0x06, 0x0101 },
- { 0x06, 0x05f8 },
- { 0x06, 0xf9fa },
- { 0x06, 0xfbef },
- { 0x06, 0x79e2 },
- { 0x06, 0x835f },
- { 0x06, 0xe0f8 },
- { 0x06, 0x9ae1 },
- { 0x06, 0xf89b },
- { 0x06, 0xef31 },
- { 0x06, 0x3b65 },
- { 0x06, 0xaa07 },
- { 0x06, 0x81e4 },
- { 0x06, 0xf89a },
- { 0x06, 0xe5f8 },
- { 0x06, 0x9baf },
- { 0x06, 0x06ae },
- { 0x05, 0x83dc },
- { 0x06, 0x8300 },
+ { 0x05, 0x8330 },
+ { 0x06, 0x2642 }
};
- rtl_phy_write(ioaddr, phy_reg_init_1,
- ARRAY_SIZE(phy_reg_init_1));
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
- mdio_write(ioaddr, 0x1f, 0x0000);
+ mdio_write(ioaddr, 0x1f, 0x0002);
+ mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600);
+ mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000);
+
+ mdio_write(ioaddr, 0x1f, 0x0001);
+ mdio_write(ioaddr, 0x17, 0x0cc0);
+
+ mdio_write(ioaddr, 0x1f, 0x0002);
+ mdio_patch(ioaddr, 0x0f, 0x0017);
+
+ rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
+}
+
+static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
+{
+ struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0002 },
+ { 0x10, 0x0008 },
+ { 0x0d, 0x006c },
+
+ { 0x1f, 0x0000 },
+ { 0x0d, 0xf880 },
+
+ { 0x1f, 0x0001 },
+ { 0x17, 0x0cc0 },
+
+ { 0x1f, 0x0001 },
+ { 0x0b, 0xa4d8 },
+ { 0x09, 0x281c },
+ { 0x07, 0x2883 },
+ { 0x0a, 0x6b35 },
+ { 0x1d, 0x3da4 },
+ { 0x1c, 0xeffd },
+ { 0x14, 0x7f52 },
+ { 0x18, 0x7fc6 },
+ { 0x08, 0x0601 },
+ { 0x06, 0x4063 },
+ { 0x10, 0xf074 },
+ { 0x1f, 0x0003 },
+ { 0x13, 0x0789 },
+ { 0x12, 0xf4bd },
+ { 0x1a, 0x04fd },
+ { 0x14, 0x84b0 },
+ { 0x1f, 0x0000 },
+ { 0x00, 0x9200 },
+
+ { 0x1f, 0x0005 },
+ { 0x01, 0x0340 },
+ { 0x1f, 0x0001 },
+ { 0x04, 0x4000 },
+ { 0x03, 0x1d21 },
+ { 0x02, 0x0c32 },
+ { 0x01, 0x0200 },
+ { 0x00, 0x5554 },
+ { 0x04, 0x4800 },
+ { 0x04, 0x4000 },
+ { 0x04, 0xf000 },
+ { 0x03, 0xdf01 },
+ { 0x02, 0xdf20 },
+ { 0x01, 0x101a },
+ { 0x00, 0xa0ff },
+ { 0x04, 0xf800 },
+ { 0x04, 0xf000 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x0023 },
+ { 0x16, 0x0000 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
@@ -1792,7 +2661,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8168cp_2_hw_phy_config(ioaddr);
break;
case RTL_GIGA_MAC_VER_25:
- rtl8168d_hw_phy_config(ioaddr);
+ rtl8168d_1_hw_phy_config(ioaddr);
+ break;
+ case RTL_GIGA_MAC_VER_26:
+ rtl8168d_2_hw_phy_config(ioaddr);
+ break;
+ case RTL_GIGA_MAC_VER_27:
+ rtl8168d_3_hw_phy_config(ioaddr);
break;
default:
@@ -2863,6 +3738,8 @@ static void rtl_hw_start_8168(struct net_device *dev)
break;
case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
rtl_hw_start_8168d(ioaddr, pdev);
break;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index ee366c5a8fa..c9c70ab0cce 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -36,6 +36,7 @@ static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n";
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 07a7e4b8f8f..cc4b2f99989 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
int count;
int cpu;
- if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
+ if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
printk(KERN_WARNING
"sfc: RSS disabled due to allocation failure\n");
return 1;
}
- cpumask_clear(core_mask);
count = 0;
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, core_mask)) {
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ecf3279fbef..f4dfd1f679a 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -826,7 +826,7 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
- .remove = __devexit_p(sgiseeq_remove),
+ .remove = __exit_p(sgiseeq_remove),
.driver = {
.name = "sgiseeq",
.owner = THIS_MODULE,
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 97949d0a699..c072f7f36ac 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -52,6 +52,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 38a508b4aad..b27156eaf26 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -73,6 +73,7 @@ static const char * const boot_msg =
/* Include files */
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 55bad408196..8f5414348e8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -37,6 +37,7 @@
#include <linux/crc32.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/mii.h>
#include <asm/irq.h>
@@ -3935,11 +3936,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
#endif
err = -ENOMEM;
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ /* space for skge@pci:0000:04:00.0 */
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
if (!hw) {
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
goto err_out_free_regions;
}
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
@@ -3974,7 +3978,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
goto err_out_free_netdev;
}
- err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
+ err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, hw->irq_name, hw);
if (err) {
dev_err(&pdev->dev, "%s: cannot assign irq %d\n",
dev->name, pdev->irq);
@@ -3982,14 +3986,17 @@ static int __devinit skge_probe(struct pci_dev *pdev,
}
skge_show_addr(dev);
- if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
- if (register_netdev(dev1) == 0)
+ if (hw->ports > 1) {
+ dev1 = skge_devinit(hw, 1, using_dac);
+ if (dev1 && register_netdev(dev1) == 0)
skge_show_addr(dev1);
else {
/* Failure to register second port need not be fatal */
dev_warn(&pdev->dev, "register of second port failed\n");
hw->dev[1] = NULL;
- free_netdev(dev1);
+ hw->ports = 1;
+ if (dev1)
+ free_netdev(dev1);
}
}
pci_set_drvdata(pdev, hw);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 17caccbb768..831de1b6e96 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2423,6 +2423,8 @@ struct skge_hw {
u16 phy_addr;
spinlock_t phy_lock;
struct tasklet_struct phy_task;
+
+ char irq_name[0]; /* skge@pci:000:04:00.0 */
};
enum pause_control {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 15140f9f2e9..2ab5c39f33c 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1497,7 +1497,6 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- hw->flags |= SKY2_HW_RAM_BUFFER;
pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
@@ -2926,6 +2925,9 @@ static int __devinit sky2_init(struct sky2_hw *hw)
++hw->ports;
}
+ if (sky2_read8(hw, B2_E_0))
+ hw->flags |= SKY2_HW_RAM_BUFFER;
+
return 0;
}
@@ -4485,13 +4487,16 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
err = -ENOMEM;
- hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ + strlen(pci_name(pdev)) + 1, GFP_KERNEL);
if (!hw) {
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
goto err_out_free_regions;
}
hw->pdev = pdev;
+ sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
@@ -4537,7 +4542,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
err = request_irq(pdev->irq, sky2_intr,
(hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
- dev->name, hw);
+ hw->irq_name, hw);
if (err) {
dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
goto err_out_unregister;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index e0f23a10104..ed54129698b 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2085,6 +2085,8 @@ struct sky2_hw {
struct timer_list watchdog_timer;
struct work_struct restart_work;
wait_queue_head_t msi_wait;
+
+ char irq_name[0];
};
static inline int sky2_is_copper(const struct sky2_hw *hw)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index e17c535a577..fe3cebb984d 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -67,6 +67,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
new file mode 100644
index 00000000000..35eaa5251d7
--- /dev/null
+++ b/drivers/net/stmmac/Kconfig
@@ -0,0 +1,53 @@
+config STMMAC_ETH
+ tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ select MII
+ select PHYLIB
+ depends on NETDEVICES && CPU_SUBTYPE_ST40
+ help
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
+ controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+
+if STMMAC_ETH
+
+config STMMAC_DA
+ bool "STMMAC DMA arbitration scheme"
+ default n
+ help
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+
+config STMMAC_DUAL_MAC
+ bool "STMMAC: dual mac support (EXPERIMENTAL)"
+ default n
+ depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
+ help
+ Some ST SoCs (for example the stx7141 and stx7200c2) have two
+ Ethernet Controllers. This option turns on the second Ethernet
+ device on this kind of platforms.
+
+config STMMAC_TIMER
+ bool "STMMAC Timer optimisation"
+ default n
+ help
+ Use an external timer for mitigating the number of network
+ interrupts.
+
+choice
+ prompt "Select Timer device"
+ depends on STMMAC_TIMER
+
+config STMMAC_TMU_TIMER
+ bool "TMU channel 2"
+ depends on CPU_SH4
+ help
+
+config STMMAC_RTC_TIMER
+ bool "Real time clock"
+ depends on RTC_CLASS
+ help
+
+endchoice
+
+endif
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
new file mode 100644
index 00000000000..b2d7a5564df
--- /dev/null
+++ b/drivers/net/stmmac/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ mac100.o gmac.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
new file mode 100644
index 00000000000..e49e5188e88
--- /dev/null
+++ b/drivers/net/stmmac/common.h
@@ -0,0 +1,330 @@
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "descs.h"
+#include <linux/io.h>
+
+/* *********************************************
+ DMA CRS Control and Status Register Mapping
+ * *********************************************/
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* ********************************
+ DMA Control register defines
+ * ********************************/
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* **************************************
+ DMA Interrupt Enable register defines
+ * **************************************/
+/**** NORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/**** ABNORMAL INTERRUPT ****/
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* ****************************
+ * DMA Status register defines
+ * ****************************/
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Other defines */
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+/* DMA STORE-AND-FORWARD Operation Mode */
+#define SF_DMA_MODE 1
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
+
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
+
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long tx_heartbeat;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long rx_partial;
+ unsigned long rx_runt;
+ unsigned long rx_toolong;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long rx_lenght;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Extra info */
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long poll_n;
+ unsigned long sched_timer_n;
+ unsigned long normal_irq_n;
+};
+
+/* GMAC core can compute the checksums in HW. */
+enum rx_frame_status {
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+};
+
+static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+static inline void stmmac_get_mac_addr(unsigned long ioaddr,
+ unsigned char *addr, unsigned int high,
+ unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* DMA core initialization */
+ int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump MAC registers */
+ void (*dump_mac_regs) (unsigned long ioaddr);
+ /* Dump DMA registers */
+ void (*dump_dma_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ /* RX descriptor ring initialization */
+ void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic);
+ /* TX descriptor ring initialization */
+ void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+ int csum_flag);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner) (struct dma_desc *p);
+ int (*get_tx_owner) (struct dma_desc *p);
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc) (struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc) (struct dma_desc *p);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*clear_tx_ic) (struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls) (struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len) (struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
+ int (*get_rx_owner) (struct dma_desc *p);
+ void (*set_rx_owner) (struct dma_desc *p);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len) (struct dma_desc *p);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ /* Multicast filter setting */
+ void (*set_filter) (struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt) (unsigned long ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct hw_cap {
+ unsigned int version; /* Core Version register (GMAC) */
+ unsigned int pmt; /* Power-Down mode (GMAC) */
+ struct mac_link link;
+ struct mii_regs mii;
+};
+
+struct mac_device_info {
+ struct hw_cap hw;
+ struct stmmac_ops *ops;
+};
+
+struct mac_device_info *gmac_setup(unsigned long addr);
+struct mac_device_info *mac100_setup(unsigned long addr);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
new file mode 100644
index 00000000000..6d2a0b2f5e5
--- /dev/null
+++ b/drivers/net/stmmac/descs.h
@@ -0,0 +1,163 @@
+/*******************************************************************************
+ Header File to describe the DMA descriptors
+ Use enhanced descriptors in case of GMAC Cores.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+struct dma_desc {
+ /* Receive descriptor */
+ union {
+ struct {
+ /* RDES0 */
+ u32 reserved1:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 mii_error:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 collision:1;
+ u32 frame_too_long:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 multicast_frame:1;
+ u32 run_frame:1;
+ u32 length_error:1;
+ u32 partial_frame_error:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 filtering_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved2:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 reserved3:5;
+ u32 disable_ic:1;
+ } rx;
+ struct {
+ /* RDES0 */
+ u32 payload_csum_error:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 error_gmii:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 late_collision:1;
+ u32 ipc_csum_error:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 vlan_tag:1;
+ u32 overflow_error:1;
+ u32 length_error:1;
+ u32 sa_filter_fail:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 da_filter_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:13;
+ u32 reserved1:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 buffer2_size:13;
+ u32 reserved2:2;
+ u32 disable_ic:1;
+ } erx; /* -- enhanced -- */
+
+ /* Transmit descriptor */
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 heartbeat_fail:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 reserved1:3;
+ u32 error_summary:1;
+ u32 reserved2:15;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved3:1;
+ u32 disable_padding:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 crc_disable:1;
+ u32 reserved4:2;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ } tx;
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 vlan_frame:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 payload_error:1;
+ u32 frame_flushed:1;
+ u32 jabber_timeout:1;
+ u32 error_summary:1;
+ u32 ip_header_error:1;
+ u32 time_stamp_status:1;
+ u32 reserved1:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 checksum_insertion:2;
+ u32 reserved2:1;
+ u32 time_stamp_enable:1;
+ u32 disable_padding:1;
+ u32 crc_disable:1;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:13;
+ u32 reserved3:3;
+ u32 buffer2_size:13;
+ u32 reserved4:3;
+ } etx; /* -- enhanced -- */
+ } des01;
+ unsigned int des2;
+ unsigned int des3;
+};
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ cic_no_pseudoheader = 2, /* IP header but pseudoheader
+ * is not calculated */
+ cic_full = 3, /* IP header and pseudoheader */
+};
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c
new file mode 100644
index 00000000000..b624bb5bae0
--- /dev/null
+++ b/drivers/net/stmmac/gmac.c
@@ -0,0 +1,693 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+#include "gmac.h"
+
+#undef GMAC_DEBUG
+/*#define GMAC_DEBUG*/
+#undef FRAME_FILTER_DEBUG
+/*#define FRAME_FILTER_DEBUG*/
+#ifdef GMAC_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+static void gmac_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\t----------------------------------------------\n"
+ "\t GMAC registers (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
+ ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+#ifdef CONFIG_STMMAC_DA
+ value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
+#endif
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Transmit FIFO flush operation */
+static void gmac_flush_tx_fifo(unsigned long ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode == SF_DMA_MODE) {
+ DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ csr6 |= DMA_CONTROL_TSF;
+ /* Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used.*/
+ csr6 |= DMA_CONTROL_OSF;
+ } else {
+ DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
+ " (threshold = %d)\n", txmode);
+ csr6 &= ~DMA_CONTROL_TSF;
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
+ /* Set the transmit threashold */
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else if (txmode <= 128)
+ csr6 |= DMA_CONTROL_TTC_128;
+ else if (txmode <= 192)
+ csr6 |= DMA_CONTROL_TTC_192;
+ else
+ csr6 |= DMA_CONTROL_TTC_256;
+ }
+
+ if (rxmode == SF_DMA_MODE) {
+ DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
+ " (threshold = %d)\n", rxmode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (rxmode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (rxmode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (rxmode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+ return;
+}
+
+/* Not yet implemented --- no RMON module */
+static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ return;
+}
+
+static void gmac_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info(" DMA registers\n");
+ for (i = 0; i < 22; i++) {
+ if ((i < 9) || (i > 17)) {
+ int offset = i * 4;
+ pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + offset),
+ readl(ioaddr + DMA_BUS_MODE + offset));
+ }
+ }
+ return;
+}
+
+static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ gmac_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ DBG(KERN_ERR "\tunderflow error\n");
+ gmac_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ gmac_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int gmac_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = good_frame;
+ } else if (status == 0x4) {
+ DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_lenght++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+ return ret;
+}
+
+static void gmac_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+static void gmac_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void gmac_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, dev->mc_count, dev->uc_count);
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (dev->mc_count > 0) {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int i;
+ struct dev_addr_list *uc_ptr = dev->uc_list;
+
+ for (i = 0; i < dev->uc_count; i++) {
+ gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
+ i + 1);
+
+ DBG(KERN_INFO "\t%d "
+ "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
+ "%02x\n", i + 1,
+ uc_ptr->da_addr[0], uc_ptr->da_addr[1],
+ uc_ptr->da_addr[2], uc_ptr->da_addr[3],
+ uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
+ uc_ptr = uc_ptr->next;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ /* To support jumbo frames */
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.erx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+
+ for (i = 0; i < ring_size; i++) {
+ p->des01.etx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.etx.end_ring = 1;
+ p++;
+ }
+
+ return;
+}
+
+static int gmac_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int gmac_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void gmac_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void gmac_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int gmac_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void gmac_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, sizeof(struct dma_desc));
+ p->des01.etx.end_ring = ter;
+
+ return;
+}
+
+static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.etx.first_segment = is_fs;
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else {
+ p->des01.etx.buffer1_size = len;
+ }
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void gmac_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void gmac_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int gmac_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.erx.frame_length;
+}
+
+struct stmmac_ops gmac_driver = {
+ .core_init = gmac_core_init,
+ .dump_mac_regs = gmac_dump_regs,
+ .dma_init = gmac_dma_init,
+ .dump_dma_regs = gmac_dump_dma_regs,
+ .dma_mode = gmac_dma_operation_mode,
+ .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
+ .tx_status = gmac_get_tx_frame_status,
+ .rx_status = gmac_get_rx_frame_status,
+ .get_tx_len = gmac_get_tx_len,
+ .set_filter = gmac_set_filter,
+ .flow_ctrl = gmac_flow_ctrl,
+ .pmt = gmac_pmt,
+ .init_rx_desc = gmac_init_rx_desc,
+ .init_tx_desc = gmac_init_tx_desc,
+ .get_tx_owner = gmac_get_tx_owner,
+ .get_rx_owner = gmac_get_rx_owner,
+ .release_tx_desc = gmac_release_tx_desc,
+ .prepare_tx_desc = gmac_prepare_tx_desc,
+ .clear_tx_ic = gmac_clear_tx_ic,
+ .close_tx_desc = gmac_close_tx_desc,
+ .get_tx_ls = gmac_get_tx_ls,
+ .set_tx_owner = gmac_set_tx_owner,
+ .set_rx_owner = gmac_set_rx_owner,
+ .get_rx_frame_len = gmac_get_rx_frame_len,
+ .host_irq_status = gmac_irq_status,
+ .set_umac_addr = gmac_set_umac_addr,
+ .get_umac_addr = gmac_get_umac_addr,
+};
+
+struct mac_device_info *gmac_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->ops = &gmac_driver;
+ mac->hw.pmt = PMT_SUPPORTED;
+ mac->hw.link.port = GMAC_CONTROL_PS;
+ mac->hw.link.duplex = GMAC_CONTROL_DM;
+ mac->hw.link.speed = GMAC_CONTROL_FES;
+ mac->hw.mii.addr = GMAC_MII_ADDR;
+ mac->hw.mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h
new file mode 100644
index 00000000000..684a363120a
--- /dev/null
+++ b/drivers/net/stmmac/gmac.h
@@ -0,0 +1,204 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+enum gmac_irq_status {
+ time_stamp_irq = 0x0200,
+ mmc_rx_csum_offload_irq = 0x0080,
+ mmc_tx_irq = 0x0040,
+ mmc_rx_irq = 0x0020,
+ mmc_irq = 0x0010,
+ pmt_irq = 0x0008,
+ pcs_ane_irq = 0x0004,
+ pcs_link_irq = 0x0002,
+ rgmii_irq = 0x0001,
+};
+#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+
+/* PMT Control and Status */
+#define GMAC_PMT 0x0000002c
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8))
+#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8))
+#define GMAC_MAX_UNICAST_ADDRESSES 16
+
+#define GMAC_AN_CTRL 0x000000c0 /* AN control */
+#define GMAC_AN_STATUS 0x000000c4 /* AN status */
+#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
+#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
+#define GMAC_TBI 0x000000d4 /* TBI extend status */
+#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+enum inter_frame_gap {
+ GMAC_CONTROL_IFG_88 = 0x00040000,
+ GMAC_CONTROL_IFG_80 = 0x00020000,
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+ GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+
+enum rx_tx_priority_ratio {
+ double_ratio = 0x00004000, /*2:1 */
+ triple_ratio = 0x00008000, /*3:1 */
+ quadruple_ratio = 0x0000c000, /*4:1 */
+};
+
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_4PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Theshold for Activating the FC */
+enum rfa {
+ act_full_minus_1 = 0x00800000,
+ act_full_minus_2 = 0x00800200,
+ act_full_minus_3 = 0x00800400,
+ act_full_minus_4 = 0x00800600,
+};
+/* Theshold for Deactivating the FC */
+enum rfd {
+ deac_full_minus_1 = 0x00400000,
+ deac_full_minus_2 = 0x00400800,
+ deac_full_minus_3 = 0x00401000,
+ deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+enum ttc_control {
+ DMA_CONTROL_TTC_64 = 0x00000000,
+ DMA_CONTROL_TTC_128 = 0x00004000,
+ DMA_CONTROL_TTC_192 = 0x00008000,
+ DMA_CONTROL_TTC_256 = 0x0000c000,
+ DMA_CONTROL_TTC_40 = 0x00010000,
+ DMA_CONTROL_TTC_32 = 0x00014000,
+ DMA_CONTROL_TTC_24 = 0x00018000,
+ DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
+
+#define DMA_CONTROL_EFC 0x00000100
+#define DMA_CONTROL_FEF 0x00000080
+#define DMA_CONTROL_FUF 0x00000040
+
+enum rtc_control {
+ DMA_CONTROL_RTC_64 = 0x00000000,
+ DMA_CONTROL_RTC_32 = 0x00000008,
+ DMA_CONTROL_RTC_96 = 0x00000010,
+ DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
+
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL 0x100
+#define GMAC_MMC_RX_INTR 0x104
+#define GMAC_MMC_TX_INTR 0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c
new file mode 100644
index 00000000000..625171b6062
--- /dev/null
+++ b/drivers/net/stmmac/mac100.c
@@ -0,0 +1,517 @@
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "common.h"
+#include "mac100.h"
+
+#undef MAC100_DEBUG
+/*#define MAC100_DEBUG*/
+#ifdef MAC100_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+static void mac100_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+ return;
+}
+
+static void mac100_dump_mac_regs(unsigned long ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t MAC100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+ pr_info("\n\tMAC management counter registers\n");
+ pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ return;
+}
+
+static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* The base address of the RX/TX descriptor lists must be written into
+ * DMA CSR3 and CSR4, respectively. */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all..
+ * The transmit threshold can be programmed by
+ * setting the TTC bits in the DMA control register.*/
+static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+static void mac100_dump_dma_regs(unsigned long ioaddr)
+{
+ int i;
+
+ DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+ return;
+}
+
+/* DMA controller has two counters to track the number of
+ the receive missed frames. */
+static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+ return;
+}
+
+static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.heartbeat_fail)) {
+ x->tx_heartbeat++;
+ stats->tx_heartbeat_errors++;
+ ret = -1;
+ }
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int mac100_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns csum_none becasue the device
+ * is not able to compute the csum in HW. */
+static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = csum_none;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warning("mac100 Error: Oversized Ethernet "
+ "frame spanned multiple buffers\n");
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.partial_frame_error))
+ x->rx_partial++;
+ if (unlikely(p->des01.rx.run_frame))
+ x->rx_runt++;
+ if (unlikely(p->des01.rx.frame_too_long))
+ x->rx_toolong++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ ret = discard_frame;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_lenght++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+ if (p->des01.rx.multicast_frame) {
+ x->rx_multicast++;
+ stats->multicast++;
+ }
+ return ret;
+}
+
+static void mac100_irq_status(unsigned long ioaddr)
+{
+ return;
+}
+
+static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void mac100_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (dev->mc_count == 0) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Perfect filter mode for physical address and Hash
+ filter for multicast */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
+ | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table */
+ int bit_nr =
+ ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
+ "HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+ return;
+}
+
+static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+
+ return;
+}
+
+/* No PMT module supported in our SoC for the Ethernet Controller. */
+static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ return;
+}
+
+static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+ int disable_rx_ic)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ if (i == ring_size - 1)
+ p->des01.rx.end_ring = 1;
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+ p++;
+ }
+ return;
+}
+
+static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+{
+ int i;
+ for (i = 0; i < ring_size; i++) {
+ p->des01.tx.own = 0;
+ if (i == ring_size - 1)
+ p->des01.tx.end_ring = 1;
+ p++;
+ }
+ return;
+}
+
+static int mac100_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int mac100_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void mac100_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void mac100_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int mac100_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void mac100_release_tx_desc(struct dma_desc *p)
+{
+ int ter = p->des01.tx.end_ring;
+
+ /* clean field used within the xmit */
+ p->des01.tx.first_segment = 0;
+ p->des01.tx.last_segment = 0;
+ p->des01.tx.buffer1_size = 0;
+
+ /* clean status reported */
+ p->des01.tx.error_summary = 0;
+ p->des01.tx.underflow_error = 0;
+ p->des01.tx.no_carrier = 0;
+ p->des01.tx.loss_carrier = 0;
+ p->des01.tx.excessive_deferral = 0;
+ p->des01.tx.excessive_collisions = 0;
+ p->des01.tx.late_collision = 0;
+ p->des01.tx.heartbeat_fail = 0;
+ p->des01.tx.deferred = 0;
+
+ /* set termination field */
+ p->des01.tx.end_ring = ter;
+
+ return;
+}
+
+static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag)
+{
+ p->des01.tx.first_segment = is_fs;
+ p->des01.tx.buffer1_size = len;
+}
+
+static void mac100_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void mac100_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int mac100_get_rx_frame_len(struct dma_desc *p)
+{
+ return p->des01.rx.frame_length;
+}
+
+struct stmmac_ops mac100_driver = {
+ .core_init = mac100_core_init,
+ .dump_mac_regs = mac100_dump_mac_regs,
+ .dma_init = mac100_dma_init,
+ .dump_dma_regs = mac100_dump_dma_regs,
+ .dma_mode = mac100_dma_operation_mode,
+ .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
+ .tx_status = mac100_get_tx_frame_status,
+ .rx_status = mac100_get_rx_frame_status,
+ .get_tx_len = mac100_get_tx_len,
+ .set_filter = mac100_set_filter,
+ .flow_ctrl = mac100_flow_ctrl,
+ .pmt = mac100_pmt,
+ .init_rx_desc = mac100_init_rx_desc,
+ .init_tx_desc = mac100_init_tx_desc,
+ .get_tx_owner = mac100_get_tx_owner,
+ .get_rx_owner = mac100_get_rx_owner,
+ .release_tx_desc = mac100_release_tx_desc,
+ .prepare_tx_desc = mac100_prepare_tx_desc,
+ .clear_tx_ic = mac100_clear_tx_ic,
+ .close_tx_desc = mac100_close_tx_desc,
+ .get_tx_ls = mac100_get_tx_ls,
+ .set_tx_owner = mac100_set_tx_owner,
+ .set_rx_owner = mac100_set_rx_owner,
+ .get_rx_frame_len = mac100_get_rx_frame_len,
+ .host_irq_status = mac100_irq_status,
+ .set_umac_addr = mac100_set_umac_addr,
+ .get_umac_addr = mac100_get_umac_addr,
+};
+
+struct mac_device_info *mac100_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ pr_info("\tMAC 10/100\n");
+
+ mac->ops = &mac100_driver;
+ mac->hw.pmt = PMT_NOT_SUPPORTED;
+ mac->hw.link.port = MAC_CONTROL_PS;
+ mac->hw.link.duplex = MAC_CONTROL_F;
+ mac->hw.link.speed = 0;
+ mac->hw.mii.addr = MAC_MII_ADDR;
+ mac->hw.mii.data = MAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h
new file mode 100644
index 00000000000..0f8f110d004
--- /dev/null
+++ b/drivers/net/stmmac/mac100.h
@@ -0,0 +1,116 @@
+/*******************************************************************************
+ MAC 10/100 Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL 0x00000000 /* MAC Control */
+#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
+#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
+#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
+#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
+#define MAC_MII_ADDR 0x00000014 /* MII Address */
+#define MAC_MII_DATA 0x00000018 /* MII Data */
+#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
+#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
+#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
+#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
+#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
+#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
+#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
+#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
+#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
+#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT 16
+#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
+
+/* MII ADDR defines */
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DEFAULT 0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
new file mode 100644
index 00000000000..6d2eae3040e
--- /dev/null
+++ b/drivers/net/stmmac/stmmac.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#define DRV_MODULE_VERSION "Oct_09"
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "common.h"
+#ifdef CONFIG_STMMAC_TIMER
+#include "stmmac_timer.h"
+#endif
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
+ struct dma_desc *dma_tx ____cacheline_aligned;
+ dma_addr_t dma_tx_phy;
+ struct sk_buff **tx_skbuff;
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ unsigned int dma_tx_size;
+ int tx_coe;
+ int tx_coalesce;
+
+ struct dma_desc *dma_rx ;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *rx_skbuff_dma;
+ struct sk_buff_head rx_recycle;
+
+ struct net_device *dev;
+ int is_gmac;
+ dma_addr_t dma_rx_phy;
+ unsigned int dma_rx_size;
+ int rx_csum;
+ unsigned int dma_buf_sz;
+ struct device *device;
+ struct mac_device_info *mac_type;
+
+ struct stmmac_extra_stats xstats;
+ struct napi_struct napi;
+
+ phy_interface_t phy_interface;
+ int pbl;
+ int bus_id;
+ int phy_addr;
+ int phy_mask;
+ int (*phy_reset) (void *priv);
+ void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void *bsp_priv;
+
+ int phy_irq;
+ struct phy_device *phydev;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ unsigned int flow_ctrl;
+ unsigned int pause;
+ struct mii_bus *mii;
+
+ u32 msg_enable;
+ spinlock_t lock;
+ int wolopts;
+ int wolenabled;
+ int shutdown;
+#ifdef CONFIG_STMMAC_TIMER
+ struct stmmac_timer *tm;
+#endif
+#ifdef STMMAC_VLAN_TAG_USED
+ struct vlan_group *vlgrp;
+#endif
+};
+
+extern int stmmac_mdio_unregister(struct net_device *ndev);
+extern int stmmac_mdio_register(struct net_device *ndev);
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
new file mode 100644
index 00000000000..694ebe6a075
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -0,0 +1,395 @@
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(tx_heartbeat),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(rx_partial),
+ STMMAC_STAT(rx_runt),
+ STMMAC_STAT(rx_toolong),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc),
+ STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(poll_n),
+ STMMAC_STAT(sched_timer_n),
+ STMMAC_STAT(normal_irq_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->is_gmac)
+ strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ else
+ strcpy(info->driver, GMAC_ETHTOOL_NAME);
+
+ strcpy(info->version, DRV_MODULE_VERSION);
+ info->fw_version[0] = '\0';
+ info->n_stats = STMMAC_STATS_LEN;
+ return;
+}
+
+int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+ if (phy == NULL) {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, dev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(dev)) {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", dev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&priv->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&priv->lock);
+ return rc;
+}
+
+int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ spin_lock(&priv->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&priv->lock);
+
+ return rc;
+}
+
+u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u32 *reg_space = (u32 *) space;
+
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ if (!priv->is_gmac) {
+ /* MAC registers */
+ for (i = 0; i < 12; i++)
+ reg_space[i] = readl(dev->base_addr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
+ readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR);
+ } else {
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(dev->base_addr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
+ readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
+ }
+
+ return;
+}
+
+int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ if (data)
+ netdev->features |= NETIF_F_HW_CSUM;
+ else
+ netdev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return priv->rx_csum;
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = priv->phydev->autoneg;
+
+ if (priv->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (priv->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&priv->lock);
+ return;
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ spin_lock(&priv->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ priv->flow_ctrl = new_pause;
+
+ if (phy->autoneg) {
+ if (netif_running(netdev)) {
+ struct ethtool_cmd cmd;
+ /* auto-negotiation automatically restarted */
+ cmd.cmd = ETHTOOL_NWAY_RST;
+ cmd.supported = phy->supported;
+ cmd.advertising = phy->advertising;
+ cmd.autoneg = phy->autoneg;
+ cmd.speed = phy->speed;
+ cmd.duplex = phy->duplex;
+ cmd.phy_address = phy->addr;
+ ret = phy_ethtool_sset(phy, &cmd);
+ }
+ } else {
+ unsigned long ioaddr = netdev->base_addr;
+ priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
+ }
+ spin_unlock(&priv->lock);
+ return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* Update HW stats if supported */
+ priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
+ ioaddr);
+
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+
+ return;
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return STMMAC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return;
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (priv->wolenabled == PMT_SUPPORTED) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = priv->wolopts;
+ }
+ spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC;
+
+ if (priv->wolenabled == PMT_NOT_SUPPORTED)
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts == 0)
+ device_set_wakeup_enable(priv->device, 0);
+ else
+ device_set_wakeup_enable(priv->device, 1);
+
+ spin_lock_irq(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+ .set_settings = stmmac_ethtool_setsettings,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = stmmac_ethtool_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = stmmac_ethtool_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_sset_count = stmmac_get_sset_count,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+#endif
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
new file mode 100644
index 00000000000..c2f14dc9ba2
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -0,0 +1,2204 @@
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/stm/soc.h>
+#include "stmmac.h"
+
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define PHY_RESOURCE_NAME "stmmacphy"
+
+#undef STMMAC_DEBUG
+/*#define STMMAC_DEBUG*/
+#ifdef STMMAC_DEBUG
+#define DBG(nlevel, klevel, fmt, args...) \
+ ((void)(netif_msg_##nlevel(priv) && \
+ printk(KERN_##klevel fmt, ## args)))
+#else
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_RX_DEBUG
+/*#define STMMAC_RX_DEBUG*/
+#ifdef STMMAC_RX_DEBUG
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_XMIT_DEBUG
+/*#define STMMAC_XMIT_DEBUG*/
+#ifdef STMMAC_TX_DEBUG
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000 /* default 5 seconds */
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
+
+static int debug = -1; /* -1: default, 0: no output, 16: all */
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, S_IRUGO);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define DMA_TX_SIZE 256
+static int dma_txsize = DMA_TX_SIZE;
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
+
+#define DMA_RX_SIZE 256
+static int dma_rxsize = DMA_RX_SIZE;
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+
+static int flow_ctrl = FLOW_OFF;
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define RX_NO_COALESCE 1 /* Always interrupt on completion */
+#define TX_NO_COALESCE -1 /* No moderation by default */
+
+/* Pay attention to tune this parameter; take care of both
+ * hardware capability and network stabitily/performance impact.
+ * Many tests showed that ~4ms latency seems to be good enough. */
+#ifdef CONFIG_STMMAC_TIMER
+#define DEFAULT_PERIODIC_RATE 256
+static int tmrate = DEFAULT_PERIODIC_RATE;
+module_param(tmrate, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
+#endif
+
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+static int buf_sz = DMA_BUFFER_SIZE;
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+/* In case of Giga ETH, we can enable/disable the COE for the
+ * transmit HW checksum computation.
+ * Note that, if tx csum is off in HW, SG will be still supported. */
+static int tx_coe = HW_CSUM;
+module_param(tx_coe, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely(dma_rxsize < 0))
+ dma_rxsize = DMA_RX_SIZE;
+ if (unlikely(dma_txsize < 0))
+ dma_txsize = DMA_TX_SIZE;
+ if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+
+ return;
+}
+
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+ int j;
+ pr_info("len = %d byte, buf addr: 0x%p", len, buf);
+ for (j = 0; j < len; j++) {
+ if ((j % 16) == 0)
+ pr_info("\n %03x:", j);
+ pr_info(" %02x", buf[j]);
+ }
+ pr_info("\n");
+ return;
+}
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+{
+ return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+}
+
+/**
+ * stmmac_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void stmmac_adjust_link(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int new_state = 0;
+ unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+
+ if (phydev == NULL)
+ return;
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
+ phydev->addr, phydev->link);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (phydev->link) {
+ u32 ctrl = readl(ioaddr + MAC_CTRL_REG);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ ctrl &= ~priv->mac_type->hw.link.duplex;
+ else
+ ctrl |= priv->mac_type->hw.link.duplex;
+ priv->oldduplex = phydev->duplex;
+ }
+ /* Flow Control operation */
+ if (phydev->pause)
+ priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ if (likely(priv->is_gmac))
+ ctrl &= ~priv->mac_type->hw.link.port;
+ break;
+ case 100:
+ case 10:
+ if (priv->is_gmac) {
+ ctrl |= priv->mac_type->hw.link.port;
+ if (phydev->speed == SPEED_100) {
+ ctrl |=
+ priv->mac_type->hw.link.
+ speed;
+ } else {
+ ctrl &=
+ ~(priv->mac_type->hw.
+ link.speed);
+ }
+ } else {
+ ctrl &= ~priv->mac_type->hw.link.port;
+ }
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
+ break;
+ default:
+ if (netif_msg_link(priv))
+ pr_warning("%s: Speed (%d) is not 10"
+ " or 100!\n", dev->name, phydev->speed);
+ break;
+ }
+
+ priv->speed = phydev->speed;
+ }
+
+ writel(ctrl, ioaddr + MAC_CTRL_REG);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id[BUS_ID_SIZE]; /* PHY to connect */
+ char bus_id[BUS_ID_SIZE];
+
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+
+ if (priv->phy_addr == -1) {
+ /* We don't have a PHY, so do nothing */
+ return 0;
+ }
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+
+ phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
+ priv->phy_interface);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /*
+ * Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+ pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static inline void stmmac_mac_enable_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value |= MAC_RNABLE_RX;
+ /* Set the RE (receive enable bit into the MAC CTRL register). */
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_enable_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value |= MAC_ENABLE_TX;
+ /* Set the TE (transmit enable bit into the MAC CTRL register). */
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value &= ~MAC_RNABLE_RX;
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+static inline void stmmac_mac_disable_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+ value &= ~MAC_ENABLE_TX;
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+/**
+ * display_ring
+ * @p: pointer to the ring.
+ * @size: size of the ring.
+ * Description: display all the descriptors within the ring.
+ */
+static void display_ring(struct dma_desc *p, int size)
+{
+ struct tmp_s {
+ u64 a;
+ unsigned int b;
+ unsigned int c;
+ };
+ int i;
+ for (i = 0; i < size; i++) {
+ struct tmp_s *x = (struct tmp_s *)(p + i);
+ pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
+ i, (unsigned int)virt_to_phys(&p[i]),
+ (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
+ x->b, x->c);
+ pr_info("\n");
+ }
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static void init_dma_desc_rings(struct net_device *dev)
+{
+ int i;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int bfsize = priv->dma_buf_sz;
+ int buff2_needed = 0;
+ int dis_ic = 0;
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Using Timers disable interrupts on completion for the reception */
+ dis_ic = 1;
+#endif
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ if (unlikely(dev->mtu >= BUF_SIZE_8KiB))
+ bfsize = BUF_SIZE_16KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_4KiB))
+ bfsize = BUF_SIZE_8KiB;
+ else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
+ bfsize = BUF_SIZE_4KiB;
+ else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
+ bfsize = BUF_SIZE_2KiB;
+ else
+ bfsize = DMA_BUFFER_SIZE;
+
+ /* If the MTU exceeds 8k so use the second buffer in the chain */
+ if (bfsize >= BUF_SIZE_8KiB)
+ buff2_needed = 1;
+
+ DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
+ txsize, rxsize, bfsize);
+
+ priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
+ priv->rx_skbuff =
+ kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->dma_rx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
+ GFP_KERNEL);
+ priv->dma_tx =
+ (struct dma_desc *)dma_alloc_coherent(priv->device,
+ txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+
+ if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
+ pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
+ return;
+ }
+
+ DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ dev->name, priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ /* RX INITIALIZATION */
+ DBG(probe, INFO, "stmmac: SKB addresses:\n"
+ "skb\t\tskb data\tdma data\n");
+
+ for (i = 0; i < rxsize; i++) {
+ struct dma_desc *p = priv->dma_rx + i;
+
+ skb = netdev_alloc_skb_ip_align(dev, bfsize);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ break;
+ }
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ bfsize, DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+ if (unlikely(buff2_needed))
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+ DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+ }
+ priv->cur_rx = 0;
+ priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ /* TX INITIALIZATION */
+ for (i = 0; i < txsize; i++) {
+ priv->tx_skbuff[i] = NULL;
+ priv->dma_tx[i].des2 = 0;
+ }
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+
+ /* Clear the Rx/Tx descriptors */
+ priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+
+ if (netif_msg_hw(priv)) {
+ pr_info("RX descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ pr_info("TX descriptor ring:\n");
+ display_ring(priv->dma_tx, txsize);
+ }
+ return;
+}
+
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+ }
+ return;
+}
+
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ if (priv->tx_skbuff[i] != NULL) {
+ struct dma_desc *p = priv->dma_tx + i;
+ if (p->des2)
+ dma_unmap_single(priv->device, p->des2,
+ priv->mac_type->ops->get_tx_len(p),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+ }
+ return;
+}
+
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ dma_free_rx_skbufs(priv);
+ dma_free_tx_skbufs(priv);
+
+ /* Free the region of consistent memory previously allocated for
+ * the DMA */
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ kfree(priv->rx_skbuff_dma);
+ kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff);
+
+ return;
+}
+
+/**
+ * stmmac_dma_start_tx
+ * @ioaddr: device I/O address
+ * Description: this function starts the DMA tx process.
+ */
+static void stmmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+static void stmmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+/**
+ * stmmac_dma_start_rx
+ * @ioaddr: device I/O address
+ * Description: this function starts the DMA rx process.
+ */
+static void stmmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+static void stmmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv : pointer to the private device structure.
+ * Description: it sets the DMA operation mode: tx/rx DMA thresholds
+ * or Store-And-Forward capability. It also verifies the COE for the
+ * transmission in case of Giga ETH.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ if (!priv->is_gmac) {
+ /* MAC 10/100 */
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->tx_coe = NO_HW_CSUM;
+ } else {
+ if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ priv->tx_coe = HW_CSUM;
+ } else {
+ /* Checksum computation is performed in software. */
+ priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
+ priv->tx_coe = NO_HW_CSUM;
+ }
+ }
+ tx_coe = priv->tx_coe;
+
+ return;
+}
+
+#ifdef STMMAC_DEBUG
+/**
+ * show_tx_process_state
+ * @status: tx descriptor status field
+ * Description: it shows the Transmit Process State for CSR5[22:20]
+ */
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+/**
+ * show_rx_process_state
+ * @status: rx descriptor status field
+ * Description: it shows the Receive Process State for CSR5[19:17]
+ */
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+/**
+ * stmmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void stmmac_tx(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned long ioaddr = priv->dev->base_addr;
+
+ while (priv->dirty_tx != priv->cur_tx) {
+ int last;
+ unsigned int entry = priv->dirty_tx % txsize;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->mac_type->ops->get_tx_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ last = priv->mac_type->ops->get_tx_ls(p);
+ if (likely(last)) {
+ int tx_error =
+ priv->mac_type->ops->tx_status(&priv->dev->stats,
+ &priv->xstats,
+ p, ioaddr);
+ if (likely(tx_error == 0)) {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ } else
+ priv->dev->stats.tx_errors++;
+ }
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
+ priv->cur_tx, priv->dirty_tx);
+
+ if (likely(p->des2))
+ dma_unmap_single(priv->device, p->des2,
+ priv->mac_type->ops->get_tx_len(p),
+ DMA_TO_DEVICE);
+ if (unlikely(p->des3))
+ p->des3 = 0;
+
+ if (likely(skb != NULL)) {
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ priv->dma_rx_size) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+
+ priv->tx_skbuff[entry] = NULL;
+ }
+
+ priv->mac_type->ops->release_tx_desc(p);
+
+ entry = (++priv->dirty_tx) % txsize;
+ }
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ TX_DBG("%s: restart transmit\n", __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+ return;
+}
+
+static inline void stmmac_enable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+ writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+ priv->tm->timer_start(tmrate);
+#endif
+}
+
+static inline void stmmac_disable_irq(struct stmmac_priv *priv)
+{
+#ifndef CONFIG_STMMAC_TIMER
+ writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+#else
+ priv->tm->timer_stop();
+#endif
+}
+
+static int stmmac_has_work(struct stmmac_priv *priv)
+{
+ unsigned int has_work = 0;
+ int rxret, tx_work = 0;
+
+ rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ (priv->cur_rx % priv->dma_rx_size));
+
+ if (priv->dirty_tx != priv->cur_tx)
+ tx_work = 1;
+
+ if (likely(!rxret || tx_work))
+ has_work = 1;
+
+ return has_work;
+}
+
+static inline void _stmmac_schedule(struct stmmac_priv *priv)
+{
+ if (likely(stmmac_has_work(priv))) {
+ stmmac_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
+}
+
+#ifdef CONFIG_STMMAC_TIMER
+void stmmac_schedule(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->xstats.sched_timer_n++;
+
+ _stmmac_schedule(priv);
+
+ return;
+}
+
+static void stmmac_no_timer_started(unsigned int x)
+{;
+};
+
+static void stmmac_no_timer_stopped(void)
+{;
+};
+#endif
+
+/**
+ * stmmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv)
+{
+ netif_stop_queue(priv->dev);
+
+ stmmac_dma_stop_tx(priv->dev->base_addr);
+ dma_free_tx_skbufs(priv);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ stmmac_dma_start_tx(priv->dev->base_addr);
+
+ priv->dev->stats.tx_errors++;
+ netif_wake_queue(priv->dev);
+
+ return;
+}
+
+/**
+ * stmmac_dma_interrupt - Interrupt handler for the driver
+ * @dev: net device structure
+ * Description: Interrupt handler for the driver (DMA).
+ */
+static void stmmac_dma_interrupt(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+
+#ifdef STMMAC_DEBUG
+ /* It displays the DMA transmit process state (CSR5 register) */
+ if (netif_msg_tx_done(priv))
+ show_tx_process_state(intr_status);
+ if (netif_msg_rx_status(priv))
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(intr, INFO, "transmit underflow\n");
+ if (unlikely(tc != SF_DMA_MODE)
+ && (tc <= 256)) {
+ /* Try to bump up the threshold */
+ tc += 64;
+ priv->mac_type->ops->dma_mode(ioaddr, tc,
+ SF_DMA_MODE);
+ priv->xstats.threshold = tc;
+ }
+ stmmac_tx_err(priv);
+ priv->xstats.tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(intr, INFO, "transmit jabber\n");
+ priv->xstats.tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(intr, INFO, "recv overflow\n");
+ priv->xstats.rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(intr, INFO, "receive buffer unavailable\n");
+ priv->xstats.rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(intr, INFO, "receive process stopped\n");
+ priv->xstats.rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(intr, INFO, "receive watchdog\n");
+ priv->xstats.rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(intr, INFO, "transmit early interrupt\n");
+ priv->xstats.tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(intr, INFO, "transmit process stopped\n");
+ priv->xstats.tx_process_stopped_irq++;
+ stmmac_tx_err(priv);
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(intr, INFO, "fatal bus error\n");
+ priv->xstats.fatal_bus_error_irq++;
+ stmmac_tx_err(priv);
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ priv->xstats.normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ _stmmac_schedule(priv);
+ }
+
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(intr, INFO, "\n\n");
+
+ return;
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int ret;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ pr_warning("%s: generated random MAC address %pM\n", dev->name,
+ dev->dev_addr);
+ }
+
+ stmmac_verify_args();
+
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ return ret;
+ }
+
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, &stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ return ret;
+ }
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
+ if (unlikely(priv->tm == NULL)) {
+ pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
+ return -ENOMEM;
+ }
+ priv->tm->freq = tmrate;
+
+ /* Test if the HW timer can be actually used.
+ * In case of failure continue with no timer. */
+ if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
+ pr_warning("stmmaceth: cannot attach the HW timer\n");
+ tmrate = 0;
+ priv->tm->freq = 0;
+ priv->tm->timer_start = stmmac_no_timer_started;
+ priv->tm->timer_stop = stmmac_no_timer_stopped;
+ }
+#endif
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+ priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
+ priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+
+ pr_err("%s: DMA initialization failed\n", __func__);
+ return -1;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* Initialize the MAC Core */
+ priv->mac_type->ops->core_init(ioaddr);
+
+ priv->shutdown = 0;
+
+ /* Initialise the MMC (if present) to disable all interrupts. */
+ writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK);
+ writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK);
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_enable_rx(ioaddr);
+ stmmac_mac_enable_tx(ioaddr);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ /* Start the ball rolling... */
+ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
+ stmmac_dma_start_tx(ioaddr);
+ stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_start(tmrate);
+#endif
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->mac_type->ops->dump_mac_regs(ioaddr);
+ priv->mac_type->ops->dump_dma_regs(ioaddr);
+ }
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ napi_enable(&priv->napi);
+ skb_queue_head_init(&priv->rx_recycle);
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ /* Stop and release the timer */
+ stmmac_close_ext_timer();
+ if (priv->tm != NULL)
+ kfree(priv->tm);
+#endif
+ napi_disable(&priv->napi);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Free the IRQ lines */
+ free_irq(dev->irq, dev);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_dma_stop_tx(dev->base_addr);
+ stmmac_dma_stop_rx(dev->base_addr);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC core */
+ stmmac_mac_disable_tx(dev->base_addr);
+ stmmac_mac_disable_rx(dev->base_addr);
+
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+/*
+ * To perform emulated hardware segmentation on skb.
+ */
+static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
+{
+ struct sk_buff *segs, *curr_skb;
+ int gso_segs = skb_shinfo(skb)->gso_segs;
+
+ /* Estimate the number of fragments in the worst case */
+ if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
+ netif_stop_queue(priv->dev);
+ TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
+ __func__);
+ if (stmmac_tx_avail(priv) < gso_segs)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(priv->dev);
+ }
+ TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
+ skb, skb->len);
+
+ segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
+ if (unlikely(IS_ERR(segs)))
+ goto sw_tso_end;
+
+ do {
+ curr_skb = segs;
+ segs = segs->next;
+ TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
+ "*next %p\n", curr_skb->len, curr_skb, segs);
+ curr_skb->next = NULL;
+ stmmac_xmit(curr_skb, priv->dev);
+ } while (segs);
+
+sw_tso_end:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
+ struct net_device *dev,
+ int csum_insertion)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ int buf2_size = nopaged_len - BUF_SIZE_8KiB;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ BUF_SIZE_8KiB, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device,
+ skb->data + BUF_SIZE_8KiB,
+ buf2_size, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 0,
+ buf2_size, csum_insertion);
+ priv->mac_type->ops->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+ return entry;
+}
+
+/**
+ * stmmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry;
+ int i, csum_insertion = 0;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct dma_desc *desc, *first;
+
+ if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: BUG! Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = priv->cur_tx % txsize;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
+ pr_info("stmmac xmit:\n"
+ "\tskb addr %p - len: %d - nopaged_len: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n",
+ skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is");
+#endif
+
+ if (unlikely(skb_is_gso(skb)))
+ return stmmac_sw_tso(priv, skb);
+
+ if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
+ if (likely(priv->tx_coe == NO_HW_CSUM))
+ skb_checksum_help(skb);
+ else
+ csum_insertion = 1;
+ }
+
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+ pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, skb_headlen(skb), nfrags, skb->ip_summed);
+#endif
+ priv->tx_skbuff[entry] = skb;
+ if (unlikely(skb->len >= BUF_SIZE_4KiB)) {
+ entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion);
+ desc = priv->dma_tx + entry;
+ } else {
+ unsigned int nopaged_len = skb_headlen(skb);
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = frag->size;
+
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+ desc->des2 = dma_map_page(priv->device, frag->page,
+ frag->page_offset,
+ len, DMA_TO_DEVICE);
+ priv->tx_skbuff[entry] = NULL;
+ priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
+ csum_insertion);
+ priv->mac_type->ops->set_tx_owner(desc);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ priv->mac_type->ops->close_tx_desc(desc);
+#ifdef CONFIG_STMMAC_TIMER
+ /* Clean IC while using timers */
+ priv->mac_type->ops->clear_tx_ic(desc);
+#endif
+ /* To avoid raise condition */
+ priv->mac_type->ops->set_tx_owner(first);
+
+ priv->cur_tx++;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
+ "first=%p, nfrags=%d\n",
+ (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
+ entry, first, nfrags);
+ display_ring(priv->dma_tx, txsize);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+#endif
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ TX_DBG("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* CSR1 enables the transmit DMA to check for new descriptor */
+ writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+
+ return NETDEV_TX_OK;
+}
+
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+ struct dma_desc *p = priv->dma_rx;
+
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ unsigned int entry = priv->dirty_rx % rxsize;
+ if (likely(priv->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb_ip_align(priv->dev,
+ bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ priv->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ (p + entry)->des2 = priv->rx_skbuff_dma[entry];
+ if (unlikely(priv->is_gmac)) {
+ if (bfsize >= BUF_SIZE_8KiB)
+ (p + entry)->des3 =
+ (p + entry)->des2 + BUF_SIZE_8KiB;
+ }
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+ }
+ priv->mac_type->ops->set_rx_owner(p + entry);
+ }
+ return;
+}
+
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int next_entry;
+ unsigned int count = 0;
+ struct dma_desc *p = priv->dma_rx + entry;
+ struct dma_desc *p_next;
+
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_hw(priv)) {
+ pr_debug(">>> stmmac_rx: descriptor ring:\n");
+ display_ring(priv->dma_rx, rxsize);
+ }
+#endif
+ count = 0;
+ while (!priv->mac_type->ops->get_rx_owner(p)) {
+ int status;
+
+ if (count >= limit)
+ break;
+
+ count++;
+
+ next_entry = (++priv->cur_rx) % rxsize;
+ p_next = priv->dma_rx + next_entry;
+ prefetch(p_next);
+
+ /* read the status of the incoming frame */
+ status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
+ if (unlikely(status == discard_frame))
+ priv->dev->stats.rx_errors++;
+ else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+ int frame_len =
+ priv->mac_type->ops->get_rx_frame_len(p) - 4;
+
+#ifdef STMMAC_RX_DEBUG
+ if (frame_len > ETH_FRAME_LEN)
+ pr_debug("\tRX frame size %d, COE status: %d\n",
+ frame_len, status);
+
+ if (netif_msg_hw(priv))
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ p, entry, p->des2);
+#endif
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info(" frame received (%dbytes)", frame_len);
+ print_pkt(skb->data, frame_len);
+ }
+#endif
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(status == csum_none)) {
+ /* always for the old mac 10/100 */
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_receive_skb(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += frame_len;
+ priv->dev->last_rx = jiffies;
+ }
+ entry = next_entry;
+ p = p_next; /* use prefetched values */
+ }
+
+ stmmac_rx_refill(priv);
+
+ priv->xstats.rx_pkt_n += count;
+
+ return count;
+}
+
+/**
+ * stmmac_poll - stmmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+ struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ int work_done = 0;
+
+ priv->xstats.poll_n++;
+ stmmac_tx(priv);
+ work_done = stmmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ stmmac_enable_irq(priv);
+ }
+ return work_done;
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ stmmac_tx_err(priv);
+ return;
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ pr_warning("%s: can't change I/O address\n", dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != dev->irq) {
+ pr_warning("%s: can't change IRQ number %d\n",
+ dev->name, dev->irq);
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+/**
+ * stmmac_multicast_list - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_multicast_list(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ priv->mac_type->ops->set_filter(dev);
+ spin_unlock(&priv->lock);
+ return;
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int max_mtu;
+
+ if (netif_running(dev)) {
+ pr_err("%s: must be stopped to change its MTU\n", dev->name);
+ return -EBUSY;
+ }
+
+ if (priv->is_gmac)
+ max_mtu = JUMBO_LEN;
+ else
+ max_mtu = ETH_DATA_LEN;
+
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+ pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ if (priv->is_gmac) {
+ unsigned long ioaddr = dev->base_addr;
+ /* To handle GMAC own interrupts */
+ priv->mac_type->ops->host_irq_status(ioaddr);
+ }
+ stmmac_dma_interrupt(dev);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently there are no special functionality supported in IOCTL, just the
+ * phy_mii_ioctl(...) can be invoked.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+
+ spin_lock(&priv->lock);
+ ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
+ spin_unlock(&priv->lock);
+ default:
+ break;
+ }
+ return ret;
+}
+
+#ifdef STMMAC_VLAN_TAG_USED
+static void stmmac_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp);
+
+ spin_lock(&priv->lock);
+ priv->vlgrp = grp;
+ spin_unlock(&priv->lock);
+
+ return;
+}
+#endif
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_set_multicast_list = stmmac_multicast_list,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_set_config = stmmac_config,
+#ifdef STMMAC_VLAN_TAG_USED
+ .ndo_vlan_rx_register = stmmac_vlan_rx_register,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/**
+ * stmmac_probe - Initialization of the adapter .
+ * @dev : device pointer
+ * Description: The function initializes the network device structure for
+ * the STMMAC driver. It also calls the low level routines
+ * in order to init the HW (i.e. the DMA engine)
+ */
+static int stmmac_probe(struct net_device *dev)
+{
+ int ret = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ ether_setup(dev);
+
+ dev->netdev_ops = &stmmac_netdev_ops;
+ stmmac_set_ethtool_ops(dev);
+
+ dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+ dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ dev->features |= NETIF_F_HW_VLAN_RX;
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ if (priv->is_gmac)
+ priv->rx_csum = 1;
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ priv->pause = pause;
+ netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
+
+ /* Get the MAC address */
+ priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+
+ if (!is_valid_ether_addr(dev->dev_addr))
+ pr_warning("\tno valid MAC address;"
+ "please, use ifconfig or nwhwconfig!\n");
+
+ ret = register_netdev(dev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n",
+ __func__, ret);
+ return -ENODEV;
+ }
+
+ DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
+ dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
+ (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
+
+ spin_lock_init(&priv->lock);
+
+ return ret;
+}
+
+/**
+ * stmmac_mac_device_setup
+ * @dev : device pointer
+ * Description: select and initialise the mac device (mac100 or Gmac).
+ */
+static int stmmac_mac_device_setup(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ struct mac_device_info *device;
+
+ if (priv->is_gmac)
+ device = gmac_setup(ioaddr);
+ else
+ device = mac100_setup(ioaddr);
+
+ if (!device)
+ return -ENOMEM;
+
+ priv->mac_type = device;
+
+ priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame */
+
+ return 0;
+}
+
+static int stmmacphy_dvr_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacphy_data *plat_dat;
+ plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+
+ pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
+ plat_dat->bus_id);
+
+ return 0;
+}
+
+static int stmmacphy_dvr_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver stmmacphy_driver = {
+ .driver = {
+ .name = PHY_RESOURCE_NAME,
+ },
+ .probe = stmmacphy_dvr_probe,
+ .remove = stmmacphy_dvr_remove,
+};
+
+/**
+ * stmmac_associate_phy
+ * @dev: pointer to device structure
+ * @data: points to the private structure.
+ * Description: Scans through all the PHYs we have registered and checks if
+ * any are associated with our MAC. If so, then just fill in
+ * the blanks in our local context structure
+ */
+static int stmmac_associate_phy(struct device *dev, void *data)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct plat_stmmacphy_data *plat_dat;
+
+ plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+
+ DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
+ plat_dat->bus_id);
+
+ /* Check that this phy is for the MAC being initialised */
+ if (priv->bus_id != plat_dat->bus_id)
+ return 0;
+
+ /* OK, this PHY is connected to the MAC.
+ Go ahead and get the parameters */
+ DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__);
+ priv->phy_irq =
+ platform_get_irq_byname(to_platform_device(dev), "phyirq");
+ DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__,
+ plat_dat->bus_id, priv->phy_irq);
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ plat_dat->phy_addr = phyaddr;
+
+ priv->phy_addr = plat_dat->phy_addr;
+ priv->phy_mask = plat_dat->phy_mask;
+ priv->phy_interface = plat_dat->interface;
+ priv->phy_reset = plat_dat->phy_reset;
+
+ DBG(probe, DEBUG, "%s: exiting\n", __func__);
+ return 1; /* forces exit of driver_for_each_device() */
+}
+
+/**
+ * stmmac_dvr_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int stmmac_dvr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ unsigned int *addr = NULL;
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+ struct plat_stmmacenet_data *plat_dat;
+
+ pr_info("STMMAC driver:\n\tplatform registration... ");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ goto out;
+ }
+ pr_info("done!\n");
+
+ if (!request_mem_region(res->start, (res->end - res->start),
+ pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ addr = ioremap(res->start, (res->end - res->start));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed \n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev) {
+ pr_err("%s: ERROR: allocating the device\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ /* Get the MAC information */
+ ndev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (ndev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->device = &(pdev->dev);
+ priv->dev = ndev;
+ plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ priv->bus_id = plat_dat->bus_id;
+ priv->pbl = plat_dat->pbl; /* TLI */
+ priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
+
+ platform_set_drvdata(pdev, ndev);
+
+ /* Set the I/O base addr */
+ ndev->base_addr = (unsigned long)addr;
+
+ /* MAC HW revice detection */
+ ret = stmmac_mac_device_setup(ndev);
+ if (ret < 0)
+ goto out;
+
+ /* Network Device Registration */
+ ret = stmmac_probe(ndev);
+ if (ret < 0)
+ goto out;
+
+ /* associate a PHY - it is provided by another platform bus */
+ if (!driver_for_each_device
+ (&(stmmacphy_driver.driver), NULL, (void *)priv,
+ stmmac_associate_phy)) {
+ pr_err("No PHY device is associated with this MAC!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bsp_priv = plat_dat->bsp_priv;
+
+ pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
+ "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name,
+ pdev->id, ndev->irq, (unsigned int)addr);
+
+ /* MDIO bus Registration */
+ pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0)
+ goto out;
+ pr_debug("registered!\n");
+
+out:
+ if (ret < 0) {
+ platform_set_drvdata(pdev, NULL);
+ release_mem_region(res->start, (res->end - res->start));
+ if (addr != NULL)
+ iounmap(addr);
+ }
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int stmmac_dvr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ pr_info("%s:\n\tremoving driver", __func__);
+
+ stmmac_dma_stop_rx(ndev->base_addr);
+ stmmac_dma_stop_tx(ndev->base_addr);
+
+ stmmac_mac_disable_rx(ndev->base_addr);
+ stmmac_mac_disable_tx(ndev->base_addr);
+
+ netif_carrier_off(ndev);
+
+ stmmac_mdio_unregister(ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+
+ iounmap((void *)ndev->base_addr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, (res->end - res->start));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int dis_ic = 0;
+
+ if (!dev || !netif_running(dev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ if (state.event == PM_EVENT_SUSPEND) {
+ netif_device_detach(dev);
+ netif_stop_queue(dev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_stop();
+ dis_ic = 1;
+#endif
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ stmmac_dma_stop_tx(dev->base_addr);
+ stmmac_dma_stop_rx(dev->base_addr);
+ /* Clear the Rx/Tx descriptors */
+ priv->mac_type->ops->init_rx_desc(priv->dma_rx,
+ priv->dma_rx_size, dis_ic);
+ priv->mac_type->ops->init_tx_desc(priv->dma_tx,
+ priv->dma_tx_size);
+
+ stmmac_mac_disable_tx(dev->base_addr);
+
+ if (device_may_wakeup(&(pdev->dev))) {
+ /* Enable Power down mode by programming the PMT regs */
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->mac_type->ops->pmt(dev->base_addr,
+ priv->wolopts);
+ } else {
+ stmmac_mac_disable_rx(dev->base_addr);
+ }
+ } else {
+ priv->shutdown = 1;
+ /* Although this can appear slightly redundant it actually
+ * makes fast the standby operation and guarantees the driver
+ * working if hibernation is on media. */
+ stmmac_release(dev);
+ }
+
+ spin_unlock(&priv->lock);
+ return 0;
+}
+
+static int stmmac_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!netif_running(dev))
+ return 0;
+
+ spin_lock(&priv->lock);
+
+ if (priv->shutdown) {
+ /* Re-open the interface and re-init the MAC/DMA
+ and the rings. */
+ stmmac_open(dev);
+ goto out_resume;
+ }
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console). */
+ if (device_may_wakeup(&(pdev->dev)))
+ if (priv->wolenabled == PMT_SUPPORTED)
+ priv->mac_type->ops->pmt(dev->base_addr, 0);
+
+ netif_device_attach(dev);
+
+ /* Enable the MAC and DMA */
+ stmmac_mac_enable_rx(ioaddr);
+ stmmac_mac_enable_tx(ioaddr);
+ stmmac_dma_start_tx(ioaddr);
+ stmmac_dma_start_rx(ioaddr);
+
+#ifdef CONFIG_STMMAC_TIMER
+ priv->tm->timer_start(tmrate);
+#endif
+ napi_enable(&priv->napi);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ netif_start_queue(dev);
+
+out_resume:
+ spin_unlock(&priv->lock);
+ return 0;
+}
+#endif
+
+static struct platform_driver stmmac_driver = {
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ },
+ .probe = stmmac_dvr_probe,
+ .remove = stmmac_dvr_remove,
+#ifdef CONFIG_PM
+ .suspend = stmmac_suspend,
+ .resume = stmmac_resume,
+#endif
+
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ if (platform_driver_register(&stmmacphy_driver)) {
+ pr_err("No PHY devices registered!\n");
+ return -ENODEV;
+ }
+
+ ret = platform_driver_register(&stmmac_driver);
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ platform_driver_unregister(&stmmacphy_driver);
+ platform_driver_unregister(&stmmac_driver);
+}
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6))
+ strict_strtoul(opt + 6, 0, (unsigned long *)&debug);
+ else if (!strncmp(opt, "phyaddr:", 8))
+ strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr);
+ else if (!strncmp(opt, "dma_txsize:", 11))
+ strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_txsize);
+ else if (!strncmp(opt, "dma_rxsize:", 11))
+ strict_strtoul(opt + 11, 0,
+ (unsigned long *)&dma_rxsize);
+ else if (!strncmp(opt, "buf_sz:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
+ else if (!strncmp(opt, "tc:", 3))
+ strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
+ else if (!strncmp(opt, "tx_coe:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
+ else if (!strncmp(opt, "watchdog:", 9))
+ strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
+ else if (!strncmp(opt, "flow_ctrl:", 10))
+ strict_strtoul(opt + 10, 0,
+ (unsigned long *)&flow_ctrl);
+ else if (!strncmp(opt, "pause:", 6))
+ strict_strtoul(opt + 6, 0, (unsigned long *)&pause);
+#ifdef CONFIG_STMMAC_TIMER
+ else if (!strncmp(opt, "tmrate:", 7))
+ strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate);
+#endif
+ }
+ return 0;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
new file mode 100644
index 00000000000..8498552a22f
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -0,0 +1,217 @@
+/*******************************************************************************
+ STMMAC Ethernet Driver -- MDIO bus implementation
+ Provides Bus interface for MII registers
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Carl Shaw <carl.shaw@st.com>
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+ int data;
+ u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+ ((phyreg << 6) & (0x000007C0)));
+ regValue |= MII_BUSY; /* in case of GMAC */
+
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+ writel(regValue, ioaddr + mii_address);
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Read the data from the MII data register */
+ data = (int)readl(ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_data = priv->mac_type->hw.mii.data;
+
+ u16 value =
+ (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+ | MII_WRITE;
+
+ value |= MII_BUSY;
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ /* Set the MII address register to write */
+ writel(phydata, ioaddr + mii_data);
+ writel(value, ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+static int stmmac_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long ioaddr = ndev->base_addr;
+ unsigned int mii_address = priv->mac_type->hw.mii.addr;
+
+ if (priv->phy_reset) {
+ pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+ priv->phy_reset(priv->bsp_priv);
+ }
+
+ /* This is a workaround for problems with the STE101P PHY.
+ * It doesn't complete its reset until at least one clock cycle
+ * on MDC, so perform a dummy mdio read.
+ */
+ writel(0, ioaddr + mii_address);
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+ int err = 0;
+ struct mii_bus *new_bus;
+ int *irqlist;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int addr, found;
+
+ new_bus = mdiobus_alloc();
+ if (new_bus == NULL)
+ return -ENOMEM;
+
+ irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (irqlist == NULL) {
+ err = -ENOMEM;
+ goto irqlist_alloc_fail;
+ }
+
+ /* Assign IRQ to phy at address phy_addr */
+ if (priv->phy_addr != -1)
+ irqlist[priv->phy_addr] = priv->phy_irq;
+
+ new_bus->name = "STMMAC MII Bus";
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ new_bus->reset = &stmmac_mdio_reset;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+ new_bus->priv = ndev;
+ new_bus->irq = irqlist;
+ new_bus->phy_mask = priv->phy_mask;
+ new_bus->parent = priv->device;
+ err = mdiobus_register(new_bus);
+ if (err != 0) {
+ pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+ goto bus_register_fail;
+ }
+
+ priv->mii = new_bus;
+
+ found = 0;
+ for (addr = 0; addr < 32; addr++) {
+ struct phy_device *phydev = new_bus->phy_map[addr];
+ if (phydev) {
+ if (priv->phy_addr == -1) {
+ priv->phy_addr = addr;
+ phydev->irq = priv->phy_irq;
+ irqlist[addr] = priv->phy_irq;
+ }
+ pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n",
+ ndev->name, phydev->phy_id, addr,
+ phydev->irq, dev_name(&phydev->dev),
+ (addr == priv->phy_addr) ? " active" : "");
+ found = 1;
+ }
+ }
+
+ if (!found)
+ pr_warning("%s: No PHY found\n", ndev->name);
+
+ return 0;
+bus_register_fail:
+ kfree(irqlist);
+irqlist_alloc_fail:
+ kfree(new_bus);
+ return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ kfree(priv->mii);
+
+ return 0;
+}
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
new file mode 100644
index 00000000000..b838c658207
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -0,0 +1,140 @@
+/*******************************************************************************
+ STMMAC external timer support.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+#include "stmmac_timer.h"
+
+static void stmmac_timer_handler(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ stmmac_schedule(dev);
+
+ return;
+}
+
+#define STMMAC_TIMER_MSG(timer, freq) \
+printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
+
+#if defined(CONFIG_STMMAC_RTC_TIMER)
+#include <linux/rtc.h>
+static struct rtc_device *stmmac_rtc;
+static rtc_task_t stmmac_task;
+
+static void stmmac_rtc_start(unsigned int new_freq)
+{
+ rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
+ return;
+}
+
+static void stmmac_rtc_stop(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+ return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ stmmac_task.private_data = dev;
+ stmmac_task.func = stmmac_timer_handler;
+
+ stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (stmmac_rtc == NULL) {
+ pr_error("open rtc device failed\n");
+ return -ENODEV;
+ }
+
+ rtc_irq_register(stmmac_rtc, &stmmac_task);
+
+ /* Periodic mode is not supported */
+ if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
+ pr_error("set periodic failed\n");
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
+
+ tm->timer_start = stmmac_rtc_start;
+ tm->timer_stop = stmmac_rtc_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
+ rtc_irq_unregister(stmmac_rtc, &stmmac_task);
+ rtc_class_close(stmmac_rtc);
+ return 0;
+}
+
+#elif defined(CONFIG_STMMAC_TMU_TIMER)
+#include <linux/clk.h>
+#define TMU_CHANNEL "tmu2_clk"
+static struct clk *timer_clock;
+
+static void stmmac_tmu_start(unsigned int new_freq)
+{
+ clk_set_rate(timer_clock, new_freq);
+ clk_enable(timer_clock);
+ return;
+}
+
+static void stmmac_tmu_stop(void)
+{
+ clk_disable(timer_clock);
+ return;
+}
+
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
+{
+ timer_clock = clk_get(NULL, TMU_CHANNEL);
+
+ if (timer_clock == NULL)
+ return -1;
+
+ if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
+ timer_clock = NULL;
+ return -1;
+ }
+
+ STMMAC_TIMER_MSG("TMU2", tm->freq);
+ tm->timer_start = stmmac_tmu_start;
+ tm->timer_stop = stmmac_tmu_stop;
+
+ return 0;
+}
+
+int stmmac_close_ext_timer(void)
+{
+ clk_disable(timer_clock);
+ tmu2_unregister_user();
+ clk_put(timer_clock);
+ return 0;
+}
+#endif
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
new file mode 100644
index 00000000000..f795cae3372
--- /dev/null
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+ STMMAC external timer Header File.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+struct stmmac_timer {
+ void (*timer_start) (unsigned int new_freq);
+ void (*timer_stop) (void);
+ unsigned int freq;
+};
+
+/* Open the HW timer device and return 0 in case of success */
+int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
+/* Stop the timer and release it */
+int stmmac_close_ext_timer(void);
+/* Function used for scheduling task within the stmmac */
+void stmmac_schedule(struct net_device *dev);
+
+#if defined(CONFIG_STMMAC_TMU_TIMER)
+extern int tmu2_register_user(void *fnt, void *data);
+extern void tmu2_unregister_user(void);
+#endif
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 305ec3d783d..7019a0d1a82 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index f1e5e4542c2..bc74db0d12f 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1016,7 +1016,6 @@ static const struct net_device_ops vnet_ops = {
.ndo_open = vnet_open,
.ndo_stop = vnet_close,
.ndo_set_multicast_list = vnet_set_rx_mode,
- .ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = vnet_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = vnet_tx_timeout,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f09bc5dfe8b..ba5d3fe753b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -902,11 +902,12 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
struct tg3 *tp = bp->priv;
u32 val;
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
- return -EAGAIN;
+ spin_lock_bh(&tp->lock);
if (tg3_readphy(tp, reg, &val))
- return -EIO;
+ val = -EIO;
+
+ spin_unlock_bh(&tp->lock);
return val;
}
@@ -914,14 +915,16 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
{
struct tg3 *tp = bp->priv;
+ u32 ret = 0;
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
- return -EAGAIN;
+ spin_lock_bh(&tp->lock);
if (tg3_writephy(tp, reg, val))
- return -EIO;
+ ret = -EIO;
- return 0;
+ spin_unlock_bh(&tp->lock);
+
+ return ret;
}
static int tg3_mdio_reset(struct mii_bus *bp)
@@ -1011,12 +1014,6 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
static void tg3_mdio_start(struct tg3 *tp)
{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- mutex_lock(&tp->mdio_bus->mdio_lock);
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
- mutex_unlock(&tp->mdio_bus->mdio_lock);
- }
-
tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
@@ -1041,15 +1038,6 @@ static void tg3_mdio_start(struct tg3 *tp)
tg3_mdio_config_5785(tp);
}
-static void tg3_mdio_stop(struct tg3 *tp)
-{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- mutex_lock(&tp->mdio_bus->mdio_lock);
- tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
- mutex_unlock(&tp->mdio_bus->mdio_lock);
- }
-}
-
static int tg3_mdio_init(struct tg3 *tp)
{
int i;
@@ -1141,7 +1129,6 @@ static void tg3_mdio_fini(struct tg3 *tp)
tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
}
}
@@ -1363,7 +1350,7 @@ static void tg3_adjust_link(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
- spin_lock(&tp->lock);
+ spin_lock_bh(&tp->lock);
mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
MAC_MODE_HALF_DUPLEX);
@@ -1431,7 +1418,7 @@ static void tg3_adjust_link(struct net_device *dev)
tp->link_config.active_speed = phydev->speed;
tp->link_config.active_duplex = phydev->duplex;
- spin_unlock(&tp->lock);
+ spin_unlock_bh(&tp->lock);
if (linkmesg)
tg3_link_report(tp);
@@ -6392,8 +6379,6 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_nvram_lock(tp);
- tg3_mdio_stop(tp);
-
tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
/* No matching tg3_nvram_unlock() after this because
@@ -8698,6 +8683,8 @@ static int tg3_close(struct net_device *dev)
del_timer_sync(&tp->timer);
+ tg3_phy_stop(tp);
+
tg3_full_lock(tp, 1);
#if 0
tg3_dump_state(tp);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 82b45d8797b..bab7940158e 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2412,7 +2412,6 @@ struct ring_info {
struct tx_ring_info {
struct sk_buff *skb;
- u32 prev_vlan_tag;
};
struct tg3_config_info {
@@ -2749,7 +2748,6 @@ struct tg3 {
#define TG3_FLG3_5701_DMA_BUG 0x00000008
#define TG3_FLG3_USE_PHYLIB 0x00000010
#define TG3_FLG3_MDIOBUS_INITED 0x00000020
-#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040
#define TG3_FLG3_PHY_CONNECTED 0x00000080
#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 525bbc5b9c9..36cb2423bcf 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -108,6 +108,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
#define IBMTR_DEBUG_MESSAGES 0
#include <linux/module.h>
+#include <linux/sched.h>
#ifdef PCMCIA /* required for ibmtr_cs.c to build */
#undef MODULE /* yes, really */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index d3ee1994b02..4fdfa2ae541 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -946,8 +946,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
char *name;
unsigned long flags = 0;
- err = -EINVAL;
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
@@ -964,7 +962,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
flags |= TUN_TAP_DEV;
name = "tap%d";
} else
- goto failed;
+ return -EINVAL;
if (*ifr->ifr_name)
name = ifr->ifr_name;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index d6d345229fe..5921f5bdd76 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -108,6 +108,7 @@ static const int multicast_filter_limit = 32;
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e2a39b9be96..e391ef969c2 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -263,6 +263,7 @@ static int kaweth_control(struct kaweth_device *kaweth,
int timeout)
{
struct usb_ctrlrequest *dr;
+ int retval;
dbg("kaweth_control()");
@@ -278,18 +279,21 @@ static int kaweth_control(struct kaweth_device *kaweth,
return -ENOMEM;
}
- dr->bRequestType= requesttype;
+ dr->bRequestType = requesttype;
dr->bRequest = request;
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(size);
- return kaweth_internal_control_msg(kaweth->dev,
- pipe,
- dr,
- data,
- size,
- timeout);
+ retval = kaweth_internal_control_msg(kaweth->dev,
+ pipe,
+ dr,
+ data,
+ size,
+ timeout);
+
+ kfree(dr);
+ return retval;
}
/****************************************************************
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6fdaba8674b..ed4a508ef26 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -62,8 +62,11 @@ static char *devid=NULL;
static struct usb_eth_dev usb_dev_id[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \
{.name = pn, .vendor = vid, .device = pid, .private = flags},
+#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
+ PEGASUS_DEV(pn, vid, pid, flags)
#include "pegasus.h"
#undef PEGASUS_DEV
+#undef PEGASUS_DEV_CLASS
{NULL, 0, 0, 0},
{NULL, 0, 0, 0}
};
@@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = {
static struct usb_device_id pegasus_ids[] = {
#define PEGASUS_DEV(pn, vid, pid, flags) \
{.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid},
+/*
+ * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product
+ * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to
+ * ignore adaptors belonging to the "Wireless" class 0xE0. For this one
+ * case anyway, seeing as the pegasus is for "Wired" adaptors.
+ */
+#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \
+ {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \
+ .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass},
#include "pegasus.h"
#undef PEGASUS_DEV
+#undef PEGASUS_DEV_CLASS
{},
{}
};
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index f968c834ff6..5d02f020073 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100,
DEFAULT_GPIO_RESET | PEGASUS_II )
-PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121,
+/*
+ * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors
+ * with the same product IDs by checking the device class too.
+ */
+PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00,
DEFAULT_GPIO_RESET | PEGASUS_II )
PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986,
DEFAULT_GPIO_RESET )
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index d032bba9bc4..0caa8008c51 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -418,6 +418,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
goto halt_fail_and_release;
}
memcpy(net->dev_addr, bp, ETH_ALEN);
+ memcpy(net->perm_addr, bp, ETH_ALEN);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 938fb3530a7..c6c922247d0 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1227,7 +1227,7 @@ static const struct driver_info smsc95xx_info = {
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_SEND_ZLP,
};
static const struct usb_device_id products[] = {
@@ -1237,10 +1237,75 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SMSC9505 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9505),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E00),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E01),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
/* SMSC9512/9514 USB Hub & Ethernet Device */
USB_DEVICE(0x0424, 0xec00),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC9500 USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9900),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505 USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9901),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9902),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9903),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9512/9514 USB Hub & Ethernet Device (SAL10) */
+ USB_DEVICE(0x0424, 0x9904),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (HAL) */
+ USB_DEVICE(0x0424, 0x9905),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9505A USB Ethernet Device (HAL) */
+ USB_DEVICE(0x0424, 0x9906),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500 USB Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9907),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9500A USB Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9908),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC9512/9514 USB Hub & Ethernet Device (Alternate ID) */
+ USB_DEVICE(0x0424, 0x9909),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 24b36f79515..ca5ca5ae061 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1049,7 +1049,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
* NOTE: strictly conforming cdc-ether devices should expect
* the ZLP here, but ignore the one-byte packet.
*/
- if ((length % dev->maxpacket) == 0) {
+ if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
urb->transfer_buffer_length++;
if (skb_tailroom(skb)) {
skb->data[skb->len] = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c498d2b043..8d009760277 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
-/* A simple network driver using virtio.
+/* A network driver using virtio.
*
* Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
*
@@ -48,19 +48,9 @@ struct virtnet_info
struct napi_struct napi;
unsigned int status;
- /* The skb we couldn't send because buffers were full. */
- struct sk_buff *last_xmit_skb;
-
- /* If we need to free in a timer, this is it. */
- struct timer_list xmit_free_timer;
-
/* Number of input buffers, and max we've ever had. */
unsigned int num, max;
- /* For cleaning up after transmission. */
- struct tasklet_struct tasklet;
- bool free_in_tasklet;
-
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -78,9 +68,17 @@ struct virtnet_info
struct page *pages;
};
-static inline void *skb_vnet_hdr(struct sk_buff *skb)
+struct skb_vnet_hdr {
+ union {
+ struct virtio_net_hdr hdr;
+ struct virtio_net_hdr_mrg_rxbuf mhdr;
+ };
+ unsigned int num_sg;
+};
+
+static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
- return (struct virtio_net_hdr *)skb->cb;
+ return (struct skb_vnet_hdr *)skb->cb;
}
static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -119,17 +117,13 @@ static void skb_xmit_done(struct virtqueue *svq)
/* We were probably waiting for more output buffers. */
netif_wake_queue(vi->dev);
-
- /* Make sure we re-xmit last_xmit_skb: if there are no more packets
- * queued, start_xmit won't be called. */
- tasklet_schedule(&vi->tasklet);
}
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
int err;
int i;
@@ -140,7 +134,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
}
if (vi->mergeable_rx_bufs) {
- struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
unsigned int copy;
char *p = page_address(skb_shinfo(skb)->frags[0].page);
@@ -148,8 +141,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
len = PAGE_SIZE;
len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
- memcpy(hdr, p, sizeof(*mhdr));
- p += sizeof(*mhdr);
+ memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
+ p += sizeof(hdr->mhdr);
copy = len;
if (copy > skb_tailroom(skb))
@@ -164,13 +157,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb_shinfo(skb)->nr_frags--;
} else {
skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(*mhdr) + copy;
+ sizeof(hdr->mhdr) + copy;
skb_shinfo(skb)->frags[0].size = len;
skb->data_len += len;
skb->len += len;
}
- while (--mhdr->num_buffers) {
+ while (--hdr->mhdr.num_buffers) {
struct sk_buff *nskb;
i = skb_shinfo(skb)->nr_frags;
@@ -184,7 +177,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
if (!nskb) {
pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, mhdr->num_buffers);
+ dev->name, hdr->mhdr.num_buffers);
dev->stats.rx_length_errors++;
goto drop;
}
@@ -205,7 +198,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
skb->len += len;
}
} else {
- len -= sizeof(struct virtio_net_hdr);
+ len -= sizeof(hdr->hdr);
if (len <= MAX_PACKET_LEN)
trim_pages(vi, skb);
@@ -223,9 +216,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
- if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
- if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
+ if (!skb_partial_csum_set(skb,
+ hdr->hdr.csum_start,
+ hdr->hdr.csum_offset))
goto frame_err;
}
@@ -233,9 +228,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
ntohs(skb->protocol), skb->len, skb->pkt_type);
- if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
- switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
@@ -248,14 +243,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
default:
if (net_ratelimit())
printk(KERN_WARNING "%s: bad gso type %u.\n",
- dev->name, hdr->gso_type);
+ dev->name, hdr->hdr.gso_type);
goto frame_err;
}
- if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- skb_shinfo(skb)->gso_size = hdr->gso_size;
+ skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
if (net_ratelimit())
printk(KERN_WARNING "%s: zero gso size.\n",
@@ -285,8 +280,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
bool oom = false;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
- for (;;) {
- struct virtio_net_hdr *hdr;
+ do {
+ struct skb_vnet_hdr *hdr;
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
if (unlikely(!skb)) {
@@ -298,7 +293,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, hdr, sizeof(*hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -328,7 +323,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
break;
}
vi->num++;
- }
+ } while (err >= num);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
@@ -346,7 +341,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
if (!vi->mergeable_rx_bufs)
return try_fill_recv_maxbufs(vi, gfp);
- for (;;) {
+ do {
skb_frag_t *f;
skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -380,7 +375,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
break;
}
vi->num++;
- }
+ } while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
vi->rvq->vq_ops->kick(vi->rvq);
@@ -448,42 +443,26 @@ again:
return received;
}
-static void free_old_xmit_skbs(struct virtnet_info *vi)
+static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
{
struct sk_buff *skb;
- unsigned int len;
+ unsigned int len, tot_sgs = 0;
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
__skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
+ tot_sgs += skb_vnet_hdr(skb)->num_sg;
kfree_skb(skb);
}
-}
-
-/* If the virtio transport doesn't always notify us when all in-flight packets
- * are consumed, we fall back to using this function on a timer to free them. */
-static void xmit_free(unsigned long data)
-{
- struct virtnet_info *vi = (void *)data;
-
- netif_tx_lock(vi->dev);
-
- free_old_xmit_skbs(vi);
-
- if (!skb_queue_empty(&vi->send))
- mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
- netif_tx_unlock(vi->dev);
+ return tot_sgs;
}
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
{
- int num, err;
struct scatterlist sg[2+MAX_SKB_FRAGS];
- struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
- struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -491,108 +470,89 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->csum_start = skb->csum_start - skb_headroom(skb);
- hdr->csum_offset = skb->csum_offset;
+ hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+ hdr->hdr.csum_offset = skb->csum_offset;
} else {
- hdr->flags = 0;
- hdr->csum_offset = hdr->csum_start = 0;
+ hdr->hdr.flags = 0;
+ hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
}
if (skb_is_gso(skb)) {
- hdr->hdr_len = skb_headlen(skb);
- hdr->gso_size = skb_shinfo(skb)->gso_size;
+ hdr->hdr.hdr_len = skb_headlen(skb);
+ hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
- hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else
BUG();
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
- hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else {
- hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
- hdr->gso_size = hdr->hdr_len = 0;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+ hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
}
- mhdr->num_buffers = 0;
+ hdr->mhdr.num_buffers = 0;
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, mhdr, sizeof(*mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
else
- sg_set_buf(sg, hdr, sizeof(*hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-
- err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
- if (err >= 0 && !vi->free_in_tasklet)
- mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
- return err;
-}
-
-static void xmit_tasklet(unsigned long data)
-{
- struct virtnet_info *vi = (void *)data;
-
- netif_tx_lock_bh(vi->dev);
- if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
- vi->svq->vq_ops->kick(vi->svq);
- vi->last_xmit_skb = NULL;
- }
- if (vi->free_in_tasklet)
- free_old_xmit_skbs(vi);
- netif_tx_unlock_bh(vi->dev);
+ hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
+ return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ int capacity;
again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
- /* If we has a buffer left over from last time, send it now. */
- if (unlikely(vi->last_xmit_skb) &&
- xmit_skb(vi, vi->last_xmit_skb) < 0)
- goto stop_queue;
-
- vi->last_xmit_skb = NULL;
-
/* Put new one in send queue and do transmit */
- if (likely(skb)) {
- __skb_queue_head(&vi->send, skb);
- if (xmit_skb(vi, skb) < 0) {
- vi->last_xmit_skb = skb;
- skb = NULL;
- goto stop_queue;
+ __skb_queue_head(&vi->send, skb);
+ capacity = xmit_skb(vi, skb);
+
+ /* This can happen with OOM and indirect buffers. */
+ if (unlikely(capacity < 0)) {
+ netif_stop_queue(dev);
+ dev_warn(&dev->dev, "Unexpected full queue\n");
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ netif_start_queue(dev);
+ goto again;
}
+ return NETDEV_TX_BUSY;
}
-done:
- vi->svq->vq_ops->kick(vi->svq);
- return NETDEV_TX_OK;
-stop_queue:
- pr_debug("%s: virtio not prepared to send\n", dev->name);
- netif_stop_queue(dev);
-
- /* Activate callback for using skbs: if this returns false it
- * means some were used in the meantime. */
- if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
- vi->svq->vq_ops->disable_cb(vi->svq);
- netif_start_queue(dev);
- goto again;
- }
- if (skb) {
- /* Drop this skb: we only queue one. */
- vi->dev->stats.tx_dropped++;
- kfree_skb(skb);
+ vi->svq->vq_ops->kick(vi->svq);
+ /* Don't wait up for transmitted skbs to be freed. */
+ skb_orphan(skb);
+ nf_reset(skb);
+
+ /* Apparently nice girls don't return TX_BUSY; stop the queue
+ * before it gets out of hand. Naturally, this wastes entries. */
+ if (capacity < 2+MAX_SKB_FRAGS) {
+ netif_stop_queue(dev);
+ if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+ /* More just got used, free them then recheck. */
+ capacity += free_old_xmit_skbs(vi);
+ if (capacity >= 2+MAX_SKB_FRAGS) {
+ netif_start_queue(dev);
+ vi->svq->vq_ops->disable_cb(vi->svq);
+ }
+ }
}
- goto done;
+
+ return NETDEV_TX_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -925,10 +885,6 @@ static int virtnet_probe(struct virtio_device *vdev)
vi->pages = NULL;
INIT_DELAYED_WORK(&vi->refill, refill_work);
- /* If they give us a callback when all buffers are done, we don't need
- * the timer. */
- vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
-
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
|| virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -960,11 +916,6 @@ static int virtnet_probe(struct virtio_device *vdev)
skb_queue_head_init(&vi->recv);
skb_queue_head_init(&vi->send);
- tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
-
- if (!vi->free_in_tasklet)
- setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -997,7 +948,7 @@ free:
return err;
}
-static void virtnet_remove(struct virtio_device *vdev)
+static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
struct sk_buff *skb;
@@ -1005,9 +956,6 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- if (!vi->free_in_tasklet)
- del_timer_sync(&vi->xmit_free_timer);
-
/* Free our skbs in send and recv queues, if any. */
while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
kfree_skb(skb);
@@ -1041,7 +989,6 @@ static unsigned int features[] = {
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
- VIRTIO_F_NOTIFY_ON_EMPTY,
};
static struct virtio_driver virtio_net = {
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
new file mode 100644
index 00000000000..880f5098eac
--- /dev/null
+++ b/drivers/net/vmxnet3/Makefile
@@ -0,0 +1,35 @@
+################################################################################
+#
+# Linux driver for VMware's vmxnet3 ethernet NIC.
+#
+# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; version 2 of the License and no later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+# NON INFRINGEMENT. See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+#
+#
+################################################################################
+
+#
+# Makefile for the VMware vmxnet3 ethernet NIC driver
+#
+
+obj-$(CONFIG_VMXNET3) += vmxnet3.o
+
+vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
new file mode 100644
index 00000000000..37108fb226d
--- /dev/null
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -0,0 +1,96 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ *
+ */
+
+#ifndef _UPT1_DEFS_H
+#define _UPT1_DEFS_H
+
+struct UPT1_TxStats {
+ u64 TSOPktsTxOK; /* TSO pkts post-segmentation */
+ u64 TSOBytesTxOK;
+ u64 ucastPktsTxOK;
+ u64 ucastBytesTxOK;
+ u64 mcastPktsTxOK;
+ u64 mcastBytesTxOK;
+ u64 bcastPktsTxOK;
+ u64 bcastBytesTxOK;
+ u64 pktsTxError;
+ u64 pktsTxDiscard;
+};
+
+struct UPT1_RxStats {
+ u64 LROPktsRxOK; /* LRO pkts */
+ u64 LROBytesRxOK; /* bytes from LRO pkts */
+ /* the following counters are for pkts from the wire, i.e., pre-LRO */
+ u64 ucastPktsRxOK;
+ u64 ucastBytesRxOK;
+ u64 mcastPktsRxOK;
+ u64 mcastBytesRxOK;
+ u64 bcastPktsRxOK;
+ u64 bcastBytesRxOK;
+ u64 pktsRxOutOfBuf;
+ u64 pktsRxError;
+};
+
+/* interrupt moderation level */
+enum {
+ UPT1_IML_NONE = 0, /* no interrupt moderation */
+ UPT1_IML_HIGHEST = 7, /* least intr generated */
+ UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */
+};
+/* values for UPT1_RSSConf.hashFunc */
+enum {
+ UPT1_RSS_HASH_TYPE_NONE = 0x0,
+ UPT1_RSS_HASH_TYPE_IPV4 = 0x01,
+ UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02,
+ UPT1_RSS_HASH_TYPE_IPV6 = 0x04,
+ UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08,
+};
+
+enum {
+ UPT1_RSS_HASH_FUNC_NONE = 0x0,
+ UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01,
+};
+
+#define UPT1_RSS_MAX_KEY_SIZE 40
+#define UPT1_RSS_MAX_IND_TABLE_SIZE 128
+
+struct UPT1_RSSConf {
+ u16 hashType;
+ u16 hashFunc;
+ u16 hashKeySize;
+ u16 indTableSize;
+ u8 hashKey[UPT1_RSS_MAX_KEY_SIZE];
+ u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE];
+};
+
+/* features */
+enum {
+ UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
+ UPT1_F_RSS = 0x0002,
+ UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
+ UPT1_F_LRO = 0x0008,
+};
+#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
new file mode 100644
index 00000000000..dc8ee4438a4
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -0,0 +1,535 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ *
+ */
+
+#ifndef _VMXNET3_DEFS_H_
+#define _VMXNET3_DEFS_H_
+
+#include "upt1_defs.h"
+
+/* all registers are 32 bit wide */
+/* BAR 1 */
+enum {
+ VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */
+ VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */
+ VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */
+ VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */
+ VMXNET3_REG_CMD = 0x20, /* Command */
+ VMXNET3_REG_MACL = 0x28, /* MAC Address Low */
+ VMXNET3_REG_MACH = 0x30, /* MAC Address High */
+ VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */
+ VMXNET3_REG_ECR = 0x40 /* Event Cause Register */
+};
+
+/* BAR 0 */
+enum {
+ VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */
+ VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */
+ VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */
+ VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */
+};
+
+#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */
+#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */
+
+#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */
+#define VMXNET3_REG_ALIGN_MASK 0x7
+
+/* I/O Mapped access to registers */
+#define VMXNET3_IO_TYPE_PT 0
+#define VMXNET3_IO_TYPE_VD 1
+#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF))
+#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
+#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+
+enum {
+ VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
+ VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
+ VMXNET3_CMD_QUIESCE_DEV,
+ VMXNET3_CMD_RESET_DEV,
+ VMXNET3_CMD_UPDATE_RX_MODE,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS,
+ VMXNET3_CMD_UPDATE_RSSIDT,
+ VMXNET3_CMD_UPDATE_IML,
+ VMXNET3_CMD_UPDATE_PMCFG,
+ VMXNET3_CMD_UPDATE_FEATURE,
+ VMXNET3_CMD_LOAD_PLUGIN,
+
+ VMXNET3_CMD_FIRST_GET = 0xF00D0000,
+ VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
+ VMXNET3_CMD_GET_STATS,
+ VMXNET3_CMD_GET_LINK,
+ VMXNET3_CMD_GET_PERM_MAC_LO,
+ VMXNET3_CMD_GET_PERM_MAC_HI,
+ VMXNET3_CMD_GET_DID_LO,
+ VMXNET3_CMD_GET_DID_HI,
+ VMXNET3_CMD_GET_DEV_EXTRA_INFO,
+ VMXNET3_CMD_GET_CONF_INTR
+};
+
+struct Vmxnet3_TxDesc {
+ u64 addr;
+
+ u32 len:14;
+ u32 gen:1; /* generation bit */
+ u32 rsvd:1;
+ u32 dtype:1; /* descriptor type */
+ u32 ext1:1;
+ u32 msscof:14; /* MSS, checksum offset, flags */
+
+ u32 hlen:10; /* header len */
+ u32 om:2; /* offload mode */
+ u32 eop:1; /* End Of Packet */
+ u32 cq:1; /* completion request */
+ u32 ext2:1;
+ u32 ti:1; /* VLAN Tag Insertion */
+ u32 tci:16; /* Tag to Insert */
+};
+
+/* TxDesc.OM values */
+#define VMXNET3_OM_NONE 0
+#define VMXNET3_OM_CSUM 2
+#define VMXNET3_OM_TSO 3
+
+/* fields in TxDesc we access w/o using bit fields */
+#define VMXNET3_TXD_EOP_SHIFT 12
+#define VMXNET3_TXD_CQ_SHIFT 13
+#define VMXNET3_TXD_GEN_SHIFT 14
+
+#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
+#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
+#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT)
+
+#define VMXNET3_HDR_COPY_SIZE 128
+
+
+struct Vmxnet3_TxDataDesc {
+ u8 data[VMXNET3_HDR_COPY_SIZE];
+};
+
+
+struct Vmxnet3_TxCompDesc {
+ u32 txdIdx:12; /* Index of the EOP TxDesc */
+ u32 ext1:20;
+
+ u32 ext2;
+ u32 ext3;
+
+ u32 rsvd:24;
+ u32 type:7; /* completion type */
+ u32 gen:1; /* generation bit */
+};
+
+
+struct Vmxnet3_RxDesc {
+ u64 addr;
+
+ u32 len:14;
+ u32 btype:1; /* Buffer Type */
+ u32 dtype:1; /* Descriptor type */
+ u32 rsvd:15;
+ u32 gen:1; /* Generation bit */
+
+ u32 ext1;
+};
+
+/* values of RXD.BTYPE */
+#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */
+#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */
+
+/* fields in RxDesc we access w/o using bit fields */
+#define VMXNET3_RXD_BTYPE_SHIFT 14
+#define VMXNET3_RXD_GEN_SHIFT 31
+
+
+struct Vmxnet3_RxCompDesc {
+ u32 rxdIdx:12; /* Index of the RxDesc */
+ u32 ext1:2;
+ u32 eop:1; /* End of Packet */
+ u32 sop:1; /* Start of Packet */
+ u32 rqID:10; /* rx queue/ring ID */
+ u32 rssType:4; /* RSS hash type used */
+ u32 cnc:1; /* Checksum Not Calculated */
+ u32 ext2:1;
+
+ u32 rssHash; /* RSS hash value */
+
+ u32 len:14; /* data length */
+ u32 err:1; /* Error */
+ u32 ts:1; /* Tag is stripped */
+ u32 tci:16; /* Tag stripped */
+
+ u32 csum:16;
+ u32 tuc:1; /* TCP/UDP Checksum Correct */
+ u32 udp:1; /* UDP packet */
+ u32 tcp:1; /* TCP packet */
+ u32 ipc:1; /* IP Checksum Correct */
+ u32 v6:1; /* IPv6 */
+ u32 v4:1; /* IPv4 */
+ u32 frg:1; /* IP Fragment */
+ u32 fcs:1; /* Frame CRC correct */
+ u32 type:7; /* completion type */
+ u32 gen:1; /* generation bit */
+};
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
+#define VMXNET3_RCD_TUC_SHIFT 16
+#define VMXNET3_RCD_IPC_SHIFT 19
+
+/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */
+#define VMXNET3_RCD_TYPE_SHIFT 56
+#define VMXNET3_RCD_GEN_SHIFT 63
+
+/* csum OK for TCP/UDP pkts over IP */
+#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
+ 1 << VMXNET3_RCD_IPC_SHIFT)
+
+/* value of RxCompDesc.rssType */
+enum {
+ VMXNET3_RCD_RSS_TYPE_NONE = 0,
+ VMXNET3_RCD_RSS_TYPE_IPV4 = 1,
+ VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2,
+ VMXNET3_RCD_RSS_TYPE_IPV6 = 3,
+ VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4,
+};
+
+
+/* a union for accessing all cmd/completion descriptors */
+union Vmxnet3_GenericDesc {
+ u64 qword[2];
+ u32 dword[4];
+ u16 word[8];
+ struct Vmxnet3_TxDesc txd;
+ struct Vmxnet3_RxDesc rxd;
+ struct Vmxnet3_TxCompDesc tcd;
+ struct Vmxnet3_RxCompDesc rcd;
+};
+
+#define VMXNET3_INIT_GEN 1
+
+/* Max size of a single tx buffer */
+#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14)
+
+/* # of tx desc needed for a tx buffer size */
+#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \
+ VMXNET3_MAX_TX_BUF_SIZE)
+
+/* max # of tx descs for a non-tso pkt */
+#define VMXNET3_MAX_TXD_PER_PKT 16
+
+/* Max size of a single rx buffer */
+#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1)
+/* Minimum size of a type 0 buffer */
+#define VMXNET3_MIN_T0_BUF_SIZE 128
+#define VMXNET3_MAX_CSUM_OFFSET 1024
+
+/* Ring base address alignment */
+#define VMXNET3_RING_BA_ALIGN 512
+#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1)
+
+/* Ring size must be a multiple of 32 */
+#define VMXNET3_RING_SIZE_ALIGN 32
+#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+
+/* Max ring size */
+#define VMXNET3_TX_RING_MAX_SIZE 4096
+#define VMXNET3_TC_RING_MAX_SIZE 4096
+#define VMXNET3_RX_RING_MAX_SIZE 4096
+#define VMXNET3_RC_RING_MAX_SIZE 8192
+
+/* a list of reasons for queue stop */
+
+enum {
+ VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */
+ VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */
+ VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */
+ VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */
+ VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */
+ VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */
+ VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */
+ VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */
+};
+
+/* completion descriptor types */
+#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */
+#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */
+
+enum {
+ VMXNET3_GOS_BITS_UNK = 0, /* unknown */
+ VMXNET3_GOS_BITS_32 = 1,
+ VMXNET3_GOS_BITS_64 = 2,
+};
+
+#define VMXNET3_GOS_TYPE_LINUX 1
+
+
+struct Vmxnet3_GOSInfo {
+ u32 gosBits:2; /* 32-bit or 64-bit? */
+ u32 gosType:4; /* which guest */
+ u32 gosVer:16; /* gos version */
+ u32 gosMisc:10; /* other info about gos */
+};
+
+
+struct Vmxnet3_DriverInfo {
+ u32 version;
+ struct Vmxnet3_GOSInfo gos;
+ u32 vmxnet3RevSpt;
+ u32 uptVerSpt;
+};
+
+
+#define VMXNET3_REV1_MAGIC 0xbabefee1
+
+/*
+ * QueueDescPA must be 128 bytes aligned. It points to an array of
+ * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc.
+ * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by
+ * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively.
+ */
+#define VMXNET3_QUEUE_DESC_ALIGN 128
+
+
+struct Vmxnet3_MiscConf {
+ struct Vmxnet3_DriverInfo driverInfo;
+ u64 uptFeatures;
+ u64 ddPA; /* driver data PA */
+ u64 queueDescPA; /* queue descriptor table PA */
+ u32 ddLen; /* driver data len */
+ u32 queueDescLen; /* queue desc. table len in bytes */
+ u32 mtu;
+ u16 maxNumRxSG;
+ u8 numTxQueues;
+ u8 numRxQueues;
+ u32 reserved[4];
+};
+
+
+struct Vmxnet3_TxQueueConf {
+ u64 txRingBasePA;
+ u64 dataRingBasePA;
+ u64 compRingBasePA;
+ u64 ddPA; /* driver data */
+ u64 reserved;
+ u32 txRingSize; /* # of tx desc */
+ u32 dataRingSize; /* # of data desc */
+ u32 compRingSize; /* # of comp desc */
+ u32 ddLen; /* size of driver data */
+ u8 intrIdx;
+ u8 _pad[7];
+};
+
+
+struct Vmxnet3_RxQueueConf {
+ u64 rxRingBasePA[2];
+ u64 compRingBasePA;
+ u64 ddPA; /* driver data */
+ u64 reserved;
+ u32 rxRingSize[2]; /* # of rx desc */
+ u32 compRingSize; /* # of rx comp desc */
+ u32 ddLen; /* size of driver data */
+ u8 intrIdx;
+ u8 _pad[7];
+};
+
+
+enum vmxnet3_intr_mask_mode {
+ VMXNET3_IMM_AUTO = 0,
+ VMXNET3_IMM_ACTIVE = 1,
+ VMXNET3_IMM_LAZY = 2
+};
+
+enum vmxnet3_intr_type {
+ VMXNET3_IT_AUTO = 0,
+ VMXNET3_IT_INTX = 1,
+ VMXNET3_IT_MSI = 2,
+ VMXNET3_IT_MSIX = 3
+};
+
+#define VMXNET3_MAX_TX_QUEUES 8
+#define VMXNET3_MAX_RX_QUEUES 16
+/* addition 1 for events */
+#define VMXNET3_MAX_INTRS 25
+
+
+struct Vmxnet3_IntrConf {
+ bool autoMask;
+ u8 numIntrs; /* # of interrupts */
+ u8 eventIntrIdx;
+ u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
+ * each intr */
+ u32 reserved[3];
+};
+
+/* one bit per VLAN ID, the size is in the units of u32 */
+#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8))
+
+
+struct Vmxnet3_QueueStatus {
+ bool stopped;
+ u8 _pad[3];
+ u32 error;
+};
+
+
+struct Vmxnet3_TxQueueCtrl {
+ u32 txNumDeferred;
+ u32 txThreshold;
+ u64 reserved;
+};
+
+
+struct Vmxnet3_RxQueueCtrl {
+ bool updateRxProd;
+ u8 _pad[7];
+ u64 reserved;
+};
+
+enum {
+ VMXNET3_RXM_UCAST = 0x01, /* unicast only */
+ VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */
+ VMXNET3_RXM_BCAST = 0x04, /* broadcast only */
+ VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */
+ VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */
+};
+
+struct Vmxnet3_RxFilterConf {
+ u32 rxMode; /* VMXNET3_RXM_xxx */
+ u16 mfTableLen; /* size of the multicast filter table */
+ u16 _pad1;
+ u64 mfTablePA; /* PA of the multicast filters table */
+ u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
+};
+
+
+#define VMXNET3_PM_MAX_FILTERS 6
+#define VMXNET3_PM_MAX_PATTERN_SIZE 128
+#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
+
+#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
+ * filters */
+
+
+struct Vmxnet3_PM_PktFilter {
+ u8 maskSize;
+ u8 patternSize;
+ u8 mask[VMXNET3_PM_MAX_MASK_SIZE];
+ u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE];
+ u8 pad[6];
+};
+
+
+struct Vmxnet3_PMConf {
+ u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
+ u8 numFilters;
+ u8 pad[5];
+ struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
+};
+
+
+struct Vmxnet3_VariableLenConfDesc {
+ u32 confVer;
+ u32 confLen;
+ u64 confPA;
+};
+
+
+struct Vmxnet3_TxQueueDesc {
+ struct Vmxnet3_TxQueueCtrl ctrl;
+ struct Vmxnet3_TxQueueConf conf;
+
+ /* Driver read after a GET command */
+ struct Vmxnet3_QueueStatus status;
+ struct UPT1_TxStats stats;
+ u8 _pad[88]; /* 128 aligned */
+};
+
+
+struct Vmxnet3_RxQueueDesc {
+ struct Vmxnet3_RxQueueCtrl ctrl;
+ struct Vmxnet3_RxQueueConf conf;
+ /* Driver read after a GET commad */
+ struct Vmxnet3_QueueStatus status;
+ struct UPT1_RxStats stats;
+ u8 __pad[88]; /* 128 aligned */
+};
+
+
+struct Vmxnet3_DSDevRead {
+ /* read-only region for device, read by dev in response to a SET cmd */
+ struct Vmxnet3_MiscConf misc;
+ struct Vmxnet3_IntrConf intrConf;
+ struct Vmxnet3_RxFilterConf rxFilterConf;
+ struct Vmxnet3_VariableLenConfDesc rssConfDesc;
+ struct Vmxnet3_VariableLenConfDesc pmConfDesc;
+ struct Vmxnet3_VariableLenConfDesc pluginConfDesc;
+};
+
+/* All structures in DriverShared are padded to multiples of 8 bytes */
+struct Vmxnet3_DriverShared {
+ u32 magic;
+ /* make devRead start at 64bit boundaries */
+ u32 pad;
+ struct Vmxnet3_DSDevRead devRead;
+ u32 ecr;
+ u32 reserved[5];
+};
+
+
+#define VMXNET3_ECR_RQERR (1 << 0)
+#define VMXNET3_ECR_TQERR (1 << 1)
+#define VMXNET3_ECR_LINK (1 << 2)
+#define VMXNET3_ECR_DIC (1 << 3)
+#define VMXNET3_ECR_DEBUG (1 << 4)
+
+/* flip the gen bit of a ring */
+#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1)
+
+/* only use this if moving the idx won't affect the gen bit */
+#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \
+ do {\
+ (idx)++;\
+ if (unlikely((idx) == (ring_size))) {\
+ (idx) = 0;\
+ } \
+ } while (0)
+
+#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \
+ (vfTable[vid >> 5] |= (1 << (vid & 31)))
+#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \
+ (vfTable[vid >> 5] &= ~(1 << (vid & 31)))
+
+#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \
+ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0)
+
+#define VMXNET3_MAX_MTU 9000
+#define VMXNET3_MIN_MTU 60
+
+#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */
+#define VMXNET3_LINK_DOWN 0
+
+#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
new file mode 100644
index 00000000000..6a16f76f277
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -0,0 +1,2565 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ *
+ */
+
+#include "vmxnet3_int.h"
+
+char vmxnet3_driver_name[] = "vmxnet3";
+#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
+
+
+/*
+ * PCI Device ID Table
+ * Last entry must be all 0s
+ */
+static const struct pci_device_id vmxnet3_pciid_table[] = {
+ {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
+
+static atomic_t devices_found;
+
+
+/*
+ * Enable/Disable the given intr
+ */
+static void
+vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
+{
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
+}
+
+
+static void
+vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
+{
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
+}
+
+
+/*
+ * Enable/Disable all intrs used by the device
+ */
+static void
+vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ vmxnet3_enable_intr(adapter, i);
+}
+
+
+static void
+vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ vmxnet3_disable_intr(adapter, i);
+}
+
+
+static void
+vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
+{
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
+}
+
+
+static bool
+vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ return netif_queue_stopped(adapter->netdev);
+}
+
+
+static void
+vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = false;
+ netif_start_queue(adapter->netdev);
+}
+
+
+static void
+vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = false;
+ netif_wake_queue(adapter->netdev);
+}
+
+
+static void
+vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
+{
+ tq->stopped = true;
+ tq->num_stop++;
+ netif_stop_queue(adapter->netdev);
+}
+
+
+/*
+ * Check the link state. This may start or stop the tx queue.
+ */
+static void
+vmxnet3_check_link(struct vmxnet3_adapter *adapter)
+{
+ u32 ret;
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ adapter->link_speed = ret >> 16;
+ if (ret & 1) { /* Link is up. */
+ printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
+ adapter->netdev->name, adapter->link_speed);
+ if (!netif_carrier_ok(adapter->netdev))
+ netif_carrier_on(adapter->netdev);
+
+ vmxnet3_tq_start(&adapter->tx_queue, adapter);
+ } else {
+ printk(KERN_INFO "%s: NIC Link is Down\n",
+ adapter->netdev->name);
+ if (netif_carrier_ok(adapter->netdev))
+ netif_carrier_off(adapter->netdev);
+
+ vmxnet3_tq_stop(&adapter->tx_queue, adapter);
+ }
+}
+
+
+static void
+vmxnet3_process_events(struct vmxnet3_adapter *adapter)
+{
+ u32 events = adapter->shared->ecr;
+ if (!events)
+ return;
+
+ vmxnet3_ack_events(adapter, events);
+
+ /* Check if link state has changed */
+ if (events & VMXNET3_ECR_LINK)
+ vmxnet3_check_link(adapter);
+
+ /* Check if there is an error on xmit/recv queues */
+ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
+
+ if (adapter->tqd_start->status.stopped) {
+ printk(KERN_ERR "%s: tq error 0x%x\n",
+ adapter->netdev->name,
+ adapter->tqd_start->status.error);
+ }
+ if (adapter->rqd_start->status.stopped) {
+ printk(KERN_ERR "%s: rq error 0x%x\n",
+ adapter->netdev->name,
+ adapter->rqd_start->status.error);
+ }
+
+ schedule_work(&adapter->work);
+ }
+}
+
+
+static void
+vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
+ struct pci_dev *pdev)
+{
+ if (tbi->map_type == VMXNET3_MAP_SINGLE)
+ pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
+ PCI_DMA_TODEVICE);
+ else if (tbi->map_type == VMXNET3_MAP_PAGE)
+ pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
+ PCI_DMA_TODEVICE);
+ else
+ BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
+
+ tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
+}
+
+
+static int
+vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
+ struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
+{
+ struct sk_buff *skb;
+ int entries = 0;
+
+ /* no out of order completion */
+ BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
+ BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
+
+ skb = tq->buf_info[eop_idx].skb;
+ BUG_ON(skb == NULL);
+ tq->buf_info[eop_idx].skb = NULL;
+
+ VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
+
+ while (tq->tx_ring.next2comp != eop_idx) {
+ vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
+ pdev);
+
+ /* update next2comp w/o tx_lock. Since we are marking more,
+ * instead of less, tx ring entries avail, the worst case is
+ * that the tx routine incorrectly re-queues a pkt due to
+ * insufficient tx ring entries.
+ */
+ vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
+ entries++;
+ }
+
+ dev_kfree_skb_any(skb);
+ return entries;
+}
+
+
+static int
+vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ int completed = 0;
+ union Vmxnet3_GenericDesc *gdesc;
+
+ gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ while (gdesc->tcd.gen == tq->comp_ring.gen) {
+ completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
+ adapter->pdev, adapter);
+
+ vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
+ gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ }
+
+ if (completed) {
+ spin_lock(&tq->tx_lock);
+ if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
+ vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
+ VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
+ netif_carrier_ok(adapter->netdev))) {
+ vmxnet3_tq_wake(tq, adapter);
+ }
+ spin_unlock(&tq->tx_lock);
+ }
+ return completed;
+}
+
+
+static void
+vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
+ struct vmxnet3_tx_buf_info *tbi;
+ union Vmxnet3_GenericDesc *gdesc;
+
+ tbi = tq->buf_info + tq->tx_ring.next2comp;
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
+
+ vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
+ if (tbi->skb) {
+ dev_kfree_skb_any(tbi->skb);
+ tbi->skb = NULL;
+ }
+ vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
+ }
+
+ /* sanity check, verify all buffers are indeed unmapped and freed */
+ for (i = 0; i < tq->tx_ring.size; i++) {
+ BUG_ON(tq->buf_info[i].skb != NULL ||
+ tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
+ }
+
+ tq->tx_ring.gen = VMXNET3_INIT_GEN;
+ tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
+
+ tq->comp_ring.gen = VMXNET3_INIT_GEN;
+ tq->comp_ring.next2proc = 0;
+}
+
+
+void
+vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ if (tq->tx_ring.base) {
+ pci_free_consistent(adapter->pdev, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc),
+ tq->tx_ring.base, tq->tx_ring.basePA);
+ tq->tx_ring.base = NULL;
+ }
+ if (tq->data_ring.base) {
+ pci_free_consistent(adapter->pdev, tq->data_ring.size *
+ sizeof(struct Vmxnet3_TxDataDesc),
+ tq->data_ring.base, tq->data_ring.basePA);
+ tq->data_ring.base = NULL;
+ }
+ if (tq->comp_ring.base) {
+ pci_free_consistent(adapter->pdev, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc),
+ tq->comp_ring.base, tq->comp_ring.basePA);
+ tq->comp_ring.base = NULL;
+ }
+ kfree(tq->buf_info);
+ tq->buf_info = NULL;
+}
+
+
+static void
+vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ /* reset the tx ring contents to 0 and reset the tx ring states */
+ memset(tq->tx_ring.base, 0, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc));
+ tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
+ tq->tx_ring.gen = VMXNET3_INIT_GEN;
+
+ memset(tq->data_ring.base, 0, tq->data_ring.size *
+ sizeof(struct Vmxnet3_TxDataDesc));
+
+ /* reset the tx comp ring contents to 0 and reset comp ring states */
+ memset(tq->comp_ring.base, 0, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc));
+ tq->comp_ring.next2proc = 0;
+ tq->comp_ring.gen = VMXNET3_INIT_GEN;
+
+ /* reset the bookkeeping data */
+ memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
+ for (i = 0; i < tq->tx_ring.size; i++)
+ tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
+
+ /* stats are not reset */
+}
+
+
+static int
+vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter)
+{
+ BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
+ tq->comp_ring.base || tq->buf_info);
+
+ tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
+ * sizeof(struct Vmxnet3_TxDesc),
+ &tq->tx_ring.basePA);
+ if (!tq->tx_ring.base) {
+ printk(KERN_ERR "%s: failed to allocate tx ring\n",
+ adapter->netdev->name);
+ goto err;
+ }
+
+ tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
+ tq->data_ring.size *
+ sizeof(struct Vmxnet3_TxDataDesc),
+ &tq->data_ring.basePA);
+ if (!tq->data_ring.base) {
+ printk(KERN_ERR "%s: failed to allocate data ring\n",
+ adapter->netdev->name);
+ goto err;
+ }
+
+ tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
+ tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc),
+ &tq->comp_ring.basePA);
+ if (!tq->comp_ring.base) {
+ printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
+ adapter->netdev->name);
+ goto err;
+ }
+
+ tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
+ GFP_KERNEL);
+ if (!tq->buf_info) {
+ printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
+ adapter->netdev->name);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ vmxnet3_tq_destroy(tq, adapter);
+ return -ENOMEM;
+}
+
+
+/*
+ * starting from ring->next2fill, allocate rx buffers for the given ring
+ * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
+ * are allocated or allocation fails
+ */
+
+static int
+vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
+ int num_to_alloc, struct vmxnet3_adapter *adapter)
+{
+ int num_allocated = 0;
+ struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
+ struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
+ u32 val;
+
+ while (num_allocated < num_to_alloc) {
+ struct vmxnet3_rx_buf_info *rbi;
+ union Vmxnet3_GenericDesc *gd;
+
+ rbi = rbi_base + ring->next2fill;
+ gd = ring->base + ring->next2fill;
+
+ if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
+ if (rbi->skb == NULL) {
+ rbi->skb = dev_alloc_skb(rbi->len +
+ NET_IP_ALIGN);
+ if (unlikely(rbi->skb == NULL)) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ rbi->skb->dev = adapter->netdev;
+
+ skb_reserve(rbi->skb, NET_IP_ALIGN);
+ rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ /* rx buffer skipped by the device */
+ }
+ val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
+ } else {
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
+ rbi->len != PAGE_SIZE);
+
+ if (rbi->page == NULL) {
+ rbi->page = alloc_page(GFP_ATOMIC);
+ if (unlikely(rbi->page == NULL)) {
+ rq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+ rbi->dma_addr = pci_map_page(adapter->pdev,
+ rbi->page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ } else {
+ /* rx buffers skipped by the device */
+ }
+ val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
+ }
+
+ BUG_ON(rbi->dma_addr == 0);
+ gd->rxd.addr = rbi->dma_addr;
+ gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
+ rbi->len;
+
+ num_allocated++;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ }
+ rq->uncommitted[ring_idx] += num_allocated;
+
+ dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
+ "%u, uncommited %u\n", num_allocated, ring->next2fill,
+ ring->next2comp, rq->uncommitted[ring_idx]);
+
+ /* so that the device can distinguish a full ring and an empty ring */
+ BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
+
+ return num_allocated;
+}
+
+
+static void
+vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_buf_info *rbi)
+{
+ struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
+ skb_shinfo(skb)->nr_frags;
+
+ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+
+ frag->page = rbi->page;
+ frag->page_offset = 0;
+ frag->size = rcd->len;
+ skb->data_len += frag->size;
+ skb_shinfo(skb)->nr_frags++;
+}
+
+
+static void
+vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
+ struct vmxnet3_adapter *adapter)
+{
+ u32 dw2, len;
+ unsigned long buf_offset;
+ int i;
+ union Vmxnet3_GenericDesc *gdesc;
+ struct vmxnet3_tx_buf_info *tbi = NULL;
+
+ BUG_ON(ctx->copy_size > skb_headlen(skb));
+
+ /* use the previous gen bit for the SOP desc */
+ dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
+
+ ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
+ gdesc = ctx->sop_txd; /* both loops below can be skipped */
+
+ /* no need to map the buffer if headers are copied */
+ if (ctx->copy_size) {
+ ctx->sop_txd->txd.addr = tq->data_ring.basePA +
+ tq->tx_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc);
+ ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
+ ctx->sop_txd->dword[3] = 0;
+
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ tbi->map_type = VMXNET3_MAP_NONE;
+
+ dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
+ ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+
+ /* use the right gen for non-SOP desc */
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ }
+
+ /* linear part can use multiple tx desc if it's big */
+ len = skb_headlen(skb) - ctx->copy_size;
+ buf_offset = ctx->copy_size;
+ while (len) {
+ u32 buf_size;
+
+ buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
+ VMXNET3_MAX_TX_BUF_SIZE : len;
+
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ tbi->map_type = VMXNET3_MAP_SINGLE;
+ tbi->dma_addr = pci_map_single(adapter->pdev,
+ skb->data + buf_offset, buf_size,
+ PCI_DMA_TODEVICE);
+
+ tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = tbi->dma_addr;
+ gdesc->dword[2] = dw2 | buf_size;
+ gdesc->dword[3] = 0;
+
+ dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
+ tq->tx_ring.next2fill, gdesc->txd.addr,
+ gdesc->dword[2], gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+
+ len -= buf_size;
+ buf_offset += buf_size;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+
+ tbi = tq->buf_info + tq->tx_ring.next2fill;
+ tbi->map_type = VMXNET3_MAP_PAGE;
+ tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
+ frag->page_offset, frag->size,
+ PCI_DMA_TODEVICE);
+
+ tbi->len = frag->size;
+
+ gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
+ BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
+
+ gdesc->txd.addr = tbi->dma_addr;
+ gdesc->dword[2] = dw2 | frag->size;
+ gdesc->dword[3] = 0;
+
+ dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
+ tq->tx_ring.next2fill, gdesc->txd.addr,
+ gdesc->dword[2], gdesc->dword[3]);
+ vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
+ dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
+ }
+
+ ctx->eop_txd = gdesc;
+
+ /* set the last buf_info for the pkt */
+ tbi->skb = skb;
+ tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+}
+
+
+/*
+ * parse and copy relevant protocol headers:
+ * For a tso pkt, relevant headers are L2/3/4 including options
+ * For a pkt requesting csum offloading, they are L2/3 and may include L4
+ * if it's a TCP/UDP pkt
+ *
+ * Returns:
+ * -1: error happens during parsing
+ * 0: protocol headers parsed, but too big to be copied
+ * 1: protocol headers parsed and copied
+ *
+ * Other effects:
+ * 1. related *ctx fields are updated.
+ * 2. ctx->copy_size is # of bytes copied
+ * 3. the portion copied is guaranteed to be in the linear part
+ *
+ */
+static int
+vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_tx_ctx *ctx,
+ struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_TxDataDesc *tdd;
+
+ if (ctx->mss) {
+ ctx->eth_ip_hdr_size = skb_transport_offset(skb);
+ ctx->l4_hdr_size = ((struct tcphdr *)
+ skb_transport_header(skb))->doff * 4;
+ ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
+ } else {
+ unsigned int pull_size;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ctx->eth_ip_hdr_size = skb_transport_offset(skb);
+
+ if (ctx->ipv4) {
+ struct iphdr *iph = (struct iphdr *)
+ skb_network_header(skb);
+ if (iph->protocol == IPPROTO_TCP) {
+ pull_size = ctx->eth_ip_hdr_size +
+ sizeof(struct tcphdr);
+
+ if (unlikely(!pskb_may_pull(skb,
+ pull_size))) {
+ goto err;
+ }
+ ctx->l4_hdr_size = ((struct tcphdr *)
+ skb_transport_header(skb))->doff * 4;
+ } else if (iph->protocol == IPPROTO_UDP) {
+ ctx->l4_hdr_size =
+ sizeof(struct udphdr);
+ } else {
+ ctx->l4_hdr_size = 0;
+ }
+ } else {
+ /* for simplicity, don't copy L4 headers */
+ ctx->l4_hdr_size = 0;
+ }
+ ctx->copy_size = ctx->eth_ip_hdr_size +
+ ctx->l4_hdr_size;
+ } else {
+ ctx->eth_ip_hdr_size = 0;
+ ctx->l4_hdr_size = 0;
+ /* copy as much as allowed */
+ ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
+ , skb_headlen(skb));
+ }
+
+ /* make sure headers are accessible directly */
+ if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
+ goto err;
+ }
+
+ if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
+ tq->stats.oversized_hdr++;
+ ctx->copy_size = 0;
+ return 0;
+ }
+
+ tdd = tq->data_ring.base + tq->tx_ring.next2fill;
+
+ memcpy(tdd->data, skb->data, ctx->copy_size);
+ dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
+ ctx->copy_size, tq->tx_ring.next2fill);
+ return 1;
+
+err:
+ return -1;
+}
+
+
+static void
+vmxnet3_prepare_tso(struct sk_buff *skb,
+ struct vmxnet3_tx_ctx *ctx)
+{
+ struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
+ if (ctx->ipv4) {
+ struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
+ tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ }
+}
+
+
+/*
+ * Transmits a pkt thru a given tq
+ * Returns:
+ * NETDEV_TX_OK: descriptors are setup successfully
+ * NETDEV_TX_OK: error occured, the pkt is dropped
+ * NETDEV_TX_BUSY: tx ring is full, queue is stopped
+ *
+ * Side-effects:
+ * 1. tx ring may be changed
+ * 2. tq stats may be updated accordingly
+ * 3. shared->txNumDeferred may be updated
+ */
+
+static int
+vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter, struct net_device *netdev)
+{
+ int ret;
+ u32 count;
+ unsigned long flags;
+ struct vmxnet3_tx_ctx ctx;
+ union Vmxnet3_GenericDesc *gdesc;
+
+ /* conservatively estimate # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
+ skb_shinfo(skb)->nr_frags + 1;
+
+ ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
+
+ ctx.mss = skb_shinfo(skb)->gso_size;
+ if (ctx.mss) {
+ if (skb_header_cloned(skb)) {
+ if (unlikely(pskb_expand_head(skb, 0, 0,
+ GFP_ATOMIC) != 0)) {
+ tq->stats.drop_tso++;
+ goto drop_pkt;
+ }
+ tq->stats.copy_skb_header++;
+ }
+ vmxnet3_prepare_tso(skb, &ctx);
+ } else {
+ if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
+
+ /* non-tso pkts must not use more than
+ * VMXNET3_MAX_TXD_PER_PKT entries
+ */
+ if (skb_linearize(skb) != 0) {
+ tq->stats.drop_too_many_frags++;
+ goto drop_pkt;
+ }
+ tq->stats.linearized++;
+
+ /* recalculate the # of descriptors to use */
+ count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
+ }
+ }
+
+ ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
+ if (ret >= 0) {
+ BUG_ON(ret <= 0 && ctx.copy_size != 0);
+ /* hdrs parsed, check against other limits */
+ if (ctx.mss) {
+ if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
+ VMXNET3_MAX_TX_BUF_SIZE)) {
+ goto hdr_too_big;
+ }
+ } else {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (unlikely(ctx.eth_ip_hdr_size +
+ skb->csum_offset >
+ VMXNET3_MAX_CSUM_OFFSET)) {
+ goto hdr_too_big;
+ }
+ }
+ }
+ } else {
+ tq->stats.drop_hdr_inspect_err++;
+ goto drop_pkt;
+ }
+
+ spin_lock_irqsave(&tq->tx_lock, flags);
+
+ if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+ tq->stats.tx_ring_full++;
+ dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
+ " next2fill %u\n", adapter->netdev->name,
+ tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+ vmxnet3_tq_stop(tq, adapter);
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* fill tx descs related to addr & len */
+ vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
+
+ /* setup the EOP desc */
+ ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
+
+ /* setup the SOP desc */
+ gdesc = ctx.sop_txd;
+ if (ctx.mss) {
+ gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_TSO;
+ gdesc->txd.msscof = ctx.mss;
+ tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
+ ctx.mss - 1) / ctx.mss;
+ } else {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ gdesc->txd.hlen = ctx.eth_ip_hdr_size;
+ gdesc->txd.om = VMXNET3_OM_CSUM;
+ gdesc->txd.msscof = ctx.eth_ip_hdr_size +
+ skb->csum_offset;
+ } else {
+ gdesc->txd.om = 0;
+ gdesc->txd.msscof = 0;
+ }
+ tq->shared->txNumDeferred++;
+ }
+
+ if (vlan_tx_tag_present(skb)) {
+ gdesc->txd.ti = 1;
+ gdesc->txd.tci = vlan_tx_tag_get(skb);
+ }
+
+ wmb();
+
+ /* finally flips the GEN bit of the SOP desc */
+ gdesc->dword[2] ^= VMXNET3_TXD_GEN;
+ dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
+ (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
+ tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
+ gdesc->dword[3]);
+
+ spin_unlock_irqrestore(&tq->tx_lock, flags);
+
+ if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
+ tq->shared->txNumDeferred = 0;
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
+ tq->tx_ring.next2fill);
+ }
+ netdev->trans_start = jiffies;
+
+ return NETDEV_TX_OK;
+
+hdr_too_big:
+ tq->stats.drop_oversized_hdr++;
+drop_pkt:
+ tq->stats.drop_total++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+
+static netdev_tx_t
+vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
+
+ return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
+}
+
+
+static void
+vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
+ struct sk_buff *skb,
+ union Vmxnet3_GenericDesc *gdesc)
+{
+ if (!gdesc->rcd.cnc && adapter->rxcsum) {
+ /* typical case: TCP/UDP over IP and both csums are correct */
+ if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
+ VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+ BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
+ BUG_ON(gdesc->rcd.frg);
+ } else {
+ if (gdesc->rcd.csum) {
+ skb->csum = htons(gdesc->rcd.csum);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+}
+
+
+static void
+vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
+ struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
+{
+ rq->stats.drop_err++;
+ if (!rcd->fcs)
+ rq->stats.drop_fcs++;
+
+ rq->stats.drop_total++;
+
+ /*
+ * We do not unmap and chain the rx buffer to the skb.
+ * We basically pretend this buffer is not used and will be recycled
+ * by vmxnet3_rq_alloc_rx_buf()
+ */
+
+ /*
+ * ctx->skb may be NULL if this is the first and the only one
+ * desc for the pkt
+ */
+ if (ctx->skb)
+ dev_kfree_skb_irq(ctx->skb);
+
+ ctx->skb = NULL;
+}
+
+
+static int
+vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter, int quota)
+{
+ static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
+ u32 num_rxd = 0;
+ struct Vmxnet3_RxCompDesc *rcd;
+ struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
+
+ rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
+ while (rcd->gen == rq->comp_ring.gen) {
+ struct vmxnet3_rx_buf_info *rbi;
+ struct sk_buff *skb;
+ int num_to_alloc;
+ struct Vmxnet3_RxDesc *rxd;
+ u32 idx, ring_idx;
+
+ if (num_rxd >= quota) {
+ /* we may stop even before we see the EOP desc of
+ * the current pkt
+ */
+ break;
+ }
+ num_rxd++;
+
+ idx = rcd->rxdIdx;
+ ring_idx = rcd->rqID == rq->qid ? 0 : 1;
+
+ rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
+ rbi = rq->buf_info[ring_idx] + idx;
+
+ BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
+
+ if (unlikely(rcd->eop && rcd->err)) {
+ vmxnet3_rx_error(rq, rcd, ctx, adapter);
+ goto rcd_done;
+ }
+
+ if (rcd->sop) { /* first buf of the pkt */
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
+ rcd->rqID != rq->qid);
+
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
+ BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
+
+ if (unlikely(rcd->len == 0)) {
+ /* Pretend the rx buffer is skipped. */
+ BUG_ON(!(rcd->sop && rcd->eop));
+ dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
+ ring_idx, idx);
+ goto rcd_done;
+ }
+
+ ctx->skb = rbi->skb;
+ rbi->skb = NULL;
+
+ pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ skb_put(ctx->skb, rcd->len);
+ } else {
+ BUG_ON(ctx->skb == NULL);
+ /* non SOP buffer must be type 1 in most cases */
+ if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+
+ if (rcd->len) {
+ pci_unmap_page(adapter->pdev,
+ rbi->dma_addr, rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ vmxnet3_append_frag(ctx->skb, rcd, rbi);
+ rbi->page = NULL;
+ }
+ } else {
+ /*
+ * The only time a non-SOP buffer is type 0 is
+ * when it's EOP and error flag is raised, which
+ * has already been handled.
+ */
+ BUG_ON(true);
+ }
+ }
+
+ skb = ctx->skb;
+ if (rcd->eop) {
+ skb->len += skb->data_len;
+ skb->truesize += skb->data_len;
+
+ vmxnet3_rx_csum(adapter, skb,
+ (union Vmxnet3_GenericDesc *)rcd);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if (unlikely(adapter->vlan_grp && rcd->ts)) {
+ vlan_hwaccel_receive_skb(skb,
+ adapter->vlan_grp, rcd->tci);
+ } else {
+ netif_receive_skb(skb);
+ }
+
+ adapter->netdev->last_rx = jiffies;
+ ctx->skb = NULL;
+ }
+
+rcd_done:
+ /* device may skip some rx descs */
+ rq->rx_ring[ring_idx].next2comp = idx;
+ VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
+ rq->rx_ring[ring_idx].size);
+
+ /* refill rx buffers frequently to avoid starving the h/w */
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
+ ring_idx);
+ if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
+ ring_idx, adapter))) {
+ vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
+ adapter);
+
+ /* if needed, update the register */
+ if (unlikely(rq->shared->updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ rq->rx_ring[ring_idx].next2fill);
+ rq->uncommitted[ring_idx] = 0;
+ }
+ }
+
+ vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
+ rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
+ }
+
+ return num_rxd;
+}
+
+
+static void
+vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ u32 i, ring_idx;
+ struct Vmxnet3_RxDesc *rxd;
+
+ for (ring_idx = 0; ring_idx < 2; ring_idx++) {
+ for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
+ rxd = &rq->rx_ring[ring_idx].base[i].rxd;
+
+ if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
+ rq->buf_info[ring_idx][i].skb) {
+ pci_unmap_single(adapter->pdev, rxd->addr,
+ rxd->len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
+ rq->buf_info[ring_idx][i].skb = NULL;
+ } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
+ rq->buf_info[ring_idx][i].page) {
+ pci_unmap_page(adapter->pdev, rxd->addr,
+ rxd->len, PCI_DMA_FROMDEVICE);
+ put_page(rq->buf_info[ring_idx][i].page);
+ rq->buf_info[ring_idx][i].page = NULL;
+ }
+ }
+
+ rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
+ rq->rx_ring[ring_idx].next2fill =
+ rq->rx_ring[ring_idx].next2comp = 0;
+ rq->uncommitted[ring_idx] = 0;
+ }
+
+ rq->comp_ring.gen = VMXNET3_INIT_GEN;
+ rq->comp_ring.next2proc = 0;
+}
+
+
+void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+ int j;
+
+ /* all rx buffers must have already been freed */
+ for (i = 0; i < 2; i++) {
+ if (rq->buf_info[i]) {
+ for (j = 0; j < rq->rx_ring[i].size; j++)
+ BUG_ON(rq->buf_info[i][j].page != NULL);
+ }
+ }
+
+
+ kfree(rq->buf_info[0]);
+
+ for (i = 0; i < 2; i++) {
+ if (rq->rx_ring[i].base) {
+ pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
+ * sizeof(struct Vmxnet3_RxDesc),
+ rq->rx_ring[i].base,
+ rq->rx_ring[i].basePA);
+ rq->rx_ring[i].base = NULL;
+ }
+ rq->buf_info[i] = NULL;
+ }
+
+ if (rq->comp_ring.base) {
+ pci_free_consistent(adapter->pdev, rq->comp_ring.size *
+ sizeof(struct Vmxnet3_RxCompDesc),
+ rq->comp_ring.base, rq->comp_ring.basePA);
+ rq->comp_ring.base = NULL;
+ }
+}
+
+
+static int
+vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter)
+{
+ int i;
+
+ /* initialize buf_info */
+ for (i = 0; i < rq->rx_ring[0].size; i++) {
+
+ /* 1st buf for a pkt is skbuff */
+ if (i % adapter->rx_buf_per_pkt == 0) {
+ rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
+ rq->buf_info[0][i].len = adapter->skb_buf_size;
+ } else { /* subsequent bufs for a pkt is frag */
+ rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
+ rq->buf_info[0][i].len = PAGE_SIZE;
+ }
+ }
+ for (i = 0; i < rq->rx_ring[1].size; i++) {
+ rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
+ rq->buf_info[1][i].len = PAGE_SIZE;
+ }
+
+ /* reset internal state and allocate buffers for both rings */
+ for (i = 0; i < 2; i++) {
+ rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
+ rq->uncommitted[i] = 0;
+
+ memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
+ sizeof(struct Vmxnet3_RxDesc));
+ rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
+ }
+ if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
+ adapter) == 0) {
+ /* at least has 1 rx buffer for the 1st ring */
+ return -ENOMEM;
+ }
+ vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
+
+ /* reset the comp ring */
+ rq->comp_ring.next2proc = 0;
+ memset(rq->comp_ring.base, 0, rq->comp_ring.size *
+ sizeof(struct Vmxnet3_RxCompDesc));
+ rq->comp_ring.gen = VMXNET3_INIT_GEN;
+
+ /* reset rxctx */
+ rq->rx_ctx.skb = NULL;
+
+ /* stats are not reset */
+ return 0;
+}
+
+
+static int
+vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
+{
+ int i;
+ size_t sz;
+ struct vmxnet3_rx_buf_info *bi;
+
+ for (i = 0; i < 2; i++) {
+
+ sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
+ rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
+ &rq->rx_ring[i].basePA);
+ if (!rq->rx_ring[i].base) {
+ printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
+ adapter->netdev->name, i);
+ goto err;
+ }
+ }
+
+ sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
+ rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
+ &rq->comp_ring.basePA);
+ if (!rq->comp_ring.base) {
+ printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
+ adapter->netdev->name);
+ goto err;
+ }
+
+ sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
+ rq->rx_ring[1].size);
+ bi = kmalloc(sz, GFP_KERNEL);
+ if (!bi) {
+ printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
+ adapter->netdev->name);
+ goto err;
+ }
+ memset(bi, 0, sz);
+ rq->buf_info[0] = bi;
+ rq->buf_info[1] = bi + rq->rx_ring[0].size;
+
+ return 0;
+
+err:
+ vmxnet3_rq_destroy(rq, adapter);
+ return -ENOMEM;
+}
+
+
+static int
+vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
+{
+ if (unlikely(adapter->shared->ecr))
+ vmxnet3_process_events(adapter);
+
+ vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
+ return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
+}
+
+
+static int
+vmxnet3_poll(struct napi_struct *napi, int budget)
+{
+ struct vmxnet3_adapter *adapter = container_of(napi,
+ struct vmxnet3_adapter, napi);
+ int rxd_done;
+
+ rxd_done = vmxnet3_do_poll(adapter, budget);
+
+ if (rxd_done < budget) {
+ napi_complete(napi);
+ vmxnet3_enable_intr(adapter, 0);
+ }
+ return rxd_done;
+}
+
+
+/* Interrupt handler for vmxnet3 */
+static irqreturn_t
+vmxnet3_intr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+ if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
+ u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
+ if (unlikely(icr == 0))
+ /* not ours */
+ return IRQ_NONE;
+ }
+
+
+ /* disable intr if needed */
+ if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+ vmxnet3_disable_intr(adapter, 0);
+
+ napi_schedule(&adapter->napi);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+
+/* netpoll callback. */
+static void
+vmxnet3_netpoll(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int irq;
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->intr.type == VMXNET3_IT_MSIX)
+ irq = adapter->intr.msix_entries[0].vector;
+ else
+#endif
+ irq = adapter->pdev->irq;
+
+ disable_irq(irq);
+ vmxnet3_intr(irq, netdev);
+ enable_irq(irq);
+}
+#endif
+
+static int
+vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
+{
+ int err;
+
+#ifdef CONFIG_PCI_MSI
+ if (adapter->intr.type == VMXNET3_IT_MSIX) {
+ /* we only use 1 MSI-X vector */
+ err = request_irq(adapter->intr.msix_entries[0].vector,
+ vmxnet3_intr, 0, adapter->netdev->name,
+ adapter->netdev);
+ } else
+#endif
+ if (adapter->intr.type == VMXNET3_IT_MSI) {
+ err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
+ adapter->netdev->name, adapter->netdev);
+ } else {
+ err = request_irq(adapter->pdev->irq, vmxnet3_intr,
+ IRQF_SHARED, adapter->netdev->name,
+ adapter->netdev);
+ }
+
+ if (err)
+ printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
+ ":%d\n", adapter->netdev->name, adapter->intr.type, err);
+
+
+ if (!err) {
+ int i;
+ /* init our intr settings */
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
+
+ /* next setup intr index for all intr sources */
+ adapter->tx_queue.comp_ring.intr_idx = 0;
+ adapter->rx_queue.comp_ring.intr_idx = 0;
+ adapter->intr.event_intr_idx = 0;
+
+ printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
+ "allocated\n", adapter->netdev->name, adapter->intr.type,
+ adapter->intr.mask_mode, adapter->intr.num_intrs);
+ }
+
+ return err;
+}
+
+
+static void
+vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
+{
+ BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
+ adapter->intr.num_intrs <= 0);
+
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX:
+ {
+ int i;
+
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ free_irq(adapter->intr.msix_entries[i].vector,
+ adapter->netdev);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ free_irq(adapter->pdev->irq, adapter->netdev);
+ break;
+ case VMXNET3_IT_INTX:
+ free_irq(adapter->pdev->irq, adapter->netdev);
+ break;
+ default:
+ BUG_ON(true);
+ }
+}
+
+
+static void
+vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+
+ if (grp) {
+ /* add vlan rx stripping. */
+ if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
+ int i;
+ struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ adapter->vlan_grp = grp;
+
+ /* update FEATURES to device */
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ /*
+ * Clear entire vfTable; then enable untagged pkts.
+ * Note: setting one entry in vfTable to non-zero turns
+ * on VLAN rx filtering.
+ */
+ for (i = 0; i < VMXNET3_VFT_SIZE; i++)
+ vfTable[i] = 0;
+
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+ } else {
+ printk(KERN_ERR "%s: vlan_rx_register when device has "
+ "no NETIF_F_HW_VLAN_RX\n", netdev->name);
+ }
+ } else {
+ /* remove vlan rx stripping. */
+ struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ adapter->vlan_grp = NULL;
+
+ if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
+ int i;
+
+ for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
+ /* clear entire vfTable; this also disables
+ * VLAN rx filtering
+ */
+ vfTable[i] = 0;
+ }
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+
+ /* update FEATURES to device */
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ }
+ }
+}
+
+
+static void
+vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
+{
+ if (adapter->vlan_grp) {
+ u16 vid;
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+ bool activeVlan = false;
+
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (vlan_group_get_device(adapter->vlan_grp, vid)) {
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ activeVlan = true;
+ }
+ }
+ if (activeVlan) {
+ /* continue to allow untagged pkts */
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+ }
+ }
+}
+
+
+static void
+vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+
+ VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+
+static void
+vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+
+ VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+}
+
+
+static u8 *
+vmxnet3_copy_mc(struct net_device *netdev)
+{
+ u8 *buf = NULL;
+ u32 sz = netdev->mc_count * ETH_ALEN;
+
+ /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
+ if (sz <= 0xffff) {
+ /* We may be called with BH disabled */
+ buf = kmalloc(sz, GFP_ATOMIC);
+ if (buf) {
+ int i;
+ struct dev_mc_list *mc = netdev->mc_list;
+
+ for (i = 0; i < netdev->mc_count; i++) {
+ BUG_ON(!mc);
+ memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
+ ETH_ALEN);
+ mc = mc->next;
+ }
+ }
+ }
+ return buf;
+}
+
+
+static void
+vmxnet3_set_mc(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_RxFilterConf *rxConf =
+ &adapter->shared->devRead.rxFilterConf;
+ u8 *new_table = NULL;
+ u32 new_mode = VMXNET3_RXM_UCAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ new_mode |= VMXNET3_RXM_PROMISC;
+
+ if (netdev->flags & IFF_BROADCAST)
+ new_mode |= VMXNET3_RXM_BCAST;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ new_mode |= VMXNET3_RXM_ALL_MULTI;
+ else
+ if (netdev->mc_count > 0) {
+ new_table = vmxnet3_copy_mc(netdev);
+ if (new_table) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ rxConf->mfTableLen = netdev->mc_count *
+ ETH_ALEN;
+ rxConf->mfTablePA = virt_to_phys(new_table);
+ } else {
+ printk(KERN_INFO "%s: failed to copy mcast list"
+ ", setting ALL_MULTI\n", netdev->name);
+ new_mode |= VMXNET3_RXM_ALL_MULTI;
+ }
+ }
+
+
+ if (!(new_mode & VMXNET3_RXM_MCAST)) {
+ rxConf->mfTableLen = 0;
+ rxConf->mfTablePA = 0;
+ }
+
+ if (new_mode != rxConf->rxMode) {
+ rxConf->rxMode = new_mode;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_RX_MODE);
+ }
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_MAC_FILTERS);
+
+ kfree(new_table);
+}
+
+
+/*
+ * Set up driver_shared based on settings in adapter.
+ */
+
+static void
+vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
+{
+ struct Vmxnet3_DriverShared *shared = adapter->shared;
+ struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ struct Vmxnet3_TxQueueConf *tqc;
+ struct Vmxnet3_RxQueueConf *rqc;
+ int i;
+
+ memset(shared, 0, sizeof(*shared));
+
+ /* driver settings */
+ shared->magic = VMXNET3_REV1_MAGIC;
+ devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
+ devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
+ devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
+ devRead->misc.driverInfo.vmxnet3RevSpt = 1;
+ devRead->misc.driverInfo.uptVerSpt = 1;
+
+ devRead->misc.ddPA = virt_to_phys(adapter);
+ devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
+
+ /* set up feature flags */
+ if (adapter->rxcsum)
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
+
+ if (adapter->lro) {
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
+ devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
+ }
+ if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
+ && adapter->vlan_grp) {
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
+ }
+
+ devRead->misc.mtu = adapter->netdev->mtu;
+ devRead->misc.queueDescPA = adapter->queue_desc_pa;
+ devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
+ sizeof(struct Vmxnet3_RxQueueDesc);
+
+ /* tx queue settings */
+ BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
+
+ devRead->misc.numTxQueues = 1;
+ tqc = &adapter->tqd_start->conf;
+ tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA;
+ tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
+ tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
+ tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info);
+ tqc->txRingSize = adapter->tx_queue.tx_ring.size;
+ tqc->dataRingSize = adapter->tx_queue.data_ring.size;
+ tqc->compRingSize = adapter->tx_queue.comp_ring.size;
+ tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) *
+ tqc->txRingSize;
+ tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
+
+ /* rx queue settings */
+ devRead->misc.numRxQueues = 1;
+ rqc = &adapter->rqd_start->conf;
+ rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
+ rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
+ rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA;
+ rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info);
+ rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size;
+ rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size;
+ rqc->compRingSize = adapter->rx_queue.comp_ring.size;
+ rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) *
+ (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
+ rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
+
+ /* intr settings */
+ devRead->intrConf.autoMask = adapter->intr.mask_mode ==
+ VMXNET3_IMM_AUTO;
+ devRead->intrConf.numIntrs = adapter->intr.num_intrs;
+ for (i = 0; i < adapter->intr.num_intrs; i++)
+ devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
+
+ devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
+
+ /* rx filter settings */
+ devRead->rxFilterConf.rxMode = 0;
+ vmxnet3_restore_vlan(adapter);
+ /* the rest are already zeroed */
+}
+
+
+int
+vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
+{
+ int err;
+ u32 ret;
+
+ dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
+ " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
+ adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
+ adapter->rx_queue.rx_ring[0].size,
+ adapter->rx_queue.rx_ring[1].size);
+
+ vmxnet3_tq_init(&adapter->tx_queue, adapter);
+ err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
+ if (err) {
+ printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
+ adapter->netdev->name, err);
+ goto rq_err;
+ }
+
+ err = vmxnet3_request_irqs(adapter);
+ if (err) {
+ printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
+ adapter->netdev->name, err);
+ goto irq_err;
+ }
+
+ vmxnet3_setup_driver_shared(adapter);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
+ VMXNET3_GET_ADDR_LO(adapter->shared_pa));
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
+ VMXNET3_GET_ADDR_HI(adapter->shared_pa));
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_ACTIVATE_DEV);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to activate dev %s: error %u\n",
+ adapter->netdev->name, ret);
+ err = -EINVAL;
+ goto activate_err;
+ }
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
+ adapter->rx_queue.rx_ring[0].next2fill);
+ VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
+ adapter->rx_queue.rx_ring[1].next2fill);
+
+ /* Apply the rx filter settins last. */
+ vmxnet3_set_mc(adapter->netdev);
+
+ /*
+ * Check link state when first activating device. It will start the
+ * tx queue if the link is up.
+ */
+ vmxnet3_check_link(adapter);
+
+ napi_enable(&adapter->napi);
+ vmxnet3_enable_all_intrs(adapter);
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+ return 0;
+
+activate_err:
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
+ vmxnet3_free_irqs(adapter);
+irq_err:
+rq_err:
+ /* free up buffers we allocated */
+ vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ return err;
+}
+
+
+void
+vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
+{
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+}
+
+
+int
+vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
+{
+ if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
+ return 0;
+
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_QUIESCE_DEV);
+ vmxnet3_disable_all_intrs(adapter);
+
+ napi_disable(&adapter->napi);
+ netif_tx_disable(adapter->netdev);
+ adapter->link_speed = 0;
+ netif_carrier_off(adapter->netdev);
+
+ vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
+ vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+ vmxnet3_free_irqs(adapter);
+ return 0;
+}
+
+
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
+{
+ u32 tmp;
+
+ tmp = *(u32 *)mac;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
+
+ tmp = (mac[5] << 8) | mac[4];
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
+}
+
+
+static int
+vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ vmxnet3_write_mac_addr(adapter, addr->sa_data);
+
+ return 0;
+}
+
+
+/* ==================== initialization and cleanup routines ============ */
+
+static int
+vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+{
+ int err;
+ unsigned long mmio_start, mmio_len;
+ struct pci_dev *pdev = adapter->pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
+ pci_name(pdev), err);
+ return err;
+ }
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ printk(KERN_ERR "pci_set_consistent_dma_mask failed "
+ "for adapter %s\n", pci_name(pdev));
+ err = -EIO;
+ goto err_set_mask;
+ }
+ *dma64 = true;
+ } else {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ printk(KERN_ERR "pci_set_dma_mask failed for adapter "
+ "%s\n", pci_name(pdev));
+ err = -EIO;
+ goto err_set_mask;
+ }
+ *dma64 = false;
+ }
+
+ err = pci_request_selected_regions(pdev, (1 << 2) - 1,
+ vmxnet3_driver_name);
+ if (err) {
+ printk(KERN_ERR "Failed to request region for adapter %s: "
+ "error %d\n", pci_name(pdev), err);
+ goto err_set_mask;
+ }
+
+ pci_set_master(pdev);
+
+ mmio_start = pci_resource_start(pdev, 0);
+ mmio_len = pci_resource_len(pdev, 0);
+ adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw_addr0) {
+ printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ mmio_start = pci_resource_start(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
+ adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
+ if (!adapter->hw_addr1) {
+ printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
+ pci_name(pdev));
+ err = -EIO;
+ goto err_bar1;
+ }
+ return 0;
+
+err_bar1:
+ iounmap(adapter->hw_addr0);
+err_ioremap:
+ pci_release_selected_regions(pdev, (1 << 2) - 1);
+err_set_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+
+static void
+vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
+{
+ BUG_ON(!adapter->pdev);
+
+ iounmap(adapter->hw_addr0);
+ iounmap(adapter->hw_addr1);
+ pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
+ pci_disable_device(adapter->pdev);
+}
+
+
+static void
+vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
+{
+ size_t sz;
+
+ if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
+ VMXNET3_MAX_ETH_HDR_SIZE) {
+ adapter->skb_buf_size = adapter->netdev->mtu +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
+ adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
+
+ adapter->rx_buf_per_pkt = 1;
+ } else {
+ adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
+ sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
+ VMXNET3_MAX_ETH_HDR_SIZE;
+ adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
+ }
+
+ /*
+ * for simplicity, force the ring0 size to be a multiple of
+ * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
+ */
+ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
+ adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
+ sz - 1) / sz * sz;
+ adapter->rx_queue.rx_ring[0].size = min_t(u32,
+ adapter->rx_queue.rx_ring[0].size,
+ VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+}
+
+
+int
+vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
+ u32 rx_ring_size, u32 rx_ring2_size)
+{
+ int err;
+
+ adapter->tx_queue.tx_ring.size = tx_ring_size;
+ adapter->tx_queue.data_ring.size = tx_ring_size;
+ adapter->tx_queue.comp_ring.size = tx_ring_size;
+ adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
+ adapter->tx_queue.stopped = true;
+ err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
+ if (err)
+ return err;
+
+ adapter->rx_queue.rx_ring[0].size = rx_ring_size;
+ adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
+ vmxnet3_adjust_rx_ring_size(adapter);
+ adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size +
+ adapter->rx_queue.rx_ring[1].size;
+ adapter->rx_queue.qid = 0;
+ adapter->rx_queue.qid2 = 1;
+ adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
+ err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+ if (err)
+ vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+
+ return err;
+}
+
+static int
+vmxnet3_open(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter;
+ int err;
+
+ adapter = netdev_priv(netdev);
+
+ spin_lock_init(&adapter->tx_queue.tx_lock);
+
+ err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
+ VMXNET3_DEF_RX_RING_SIZE,
+ VMXNET3_DEF_RX_RING_SIZE);
+ if (err)
+ goto queue_err;
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err)
+ goto activate_err;
+
+ return 0;
+
+activate_err:
+ vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+queue_err:
+ return err;
+}
+
+
+static int
+vmxnet3_close(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ msleep(1);
+
+ vmxnet3_quiesce_dev(adapter);
+
+ vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+
+
+ return 0;
+}
+
+
+void
+vmxnet3_force_close(struct vmxnet3_adapter *adapter)
+{
+ /*
+ * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
+ * vmxnet3_close() will deadlock.
+ */
+ BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
+
+ /* we need to enable NAPI, otherwise dev_close will deadlock */
+ napi_enable(&adapter->napi);
+ dev_close(adapter->netdev);
+}
+
+
+static int
+vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
+ return -EINVAL;
+
+ if (new_mtu > 1500 && !adapter->jumbo_frame)
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (netif_running(netdev)) {
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+
+ /* we need to re-create the rx queue based on the new mtu */
+ vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+ vmxnet3_adjust_rx_ring_size(adapter);
+ adapter->rx_queue.comp_ring.size =
+ adapter->rx_queue.rx_ring[0].size +
+ adapter->rx_queue.rx_ring[1].size;
+ err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+ if (err) {
+ printk(KERN_ERR "%s: failed to re-create rx queue,"
+ " error %d. Closing it.\n", netdev->name, err);
+ goto out;
+ }
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: failed to re-activate, error %d. "
+ "Closing it\n", netdev->name, err);
+ goto out;
+ }
+ }
+
+out:
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ if (err)
+ vmxnet3_force_close(adapter);
+
+ return err;
+}
+
+
+static void
+vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_LRO;
+
+ printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
+
+ adapter->rxcsum = true;
+ adapter->jumbo_frame = true;
+ adapter->lro = true;
+
+ if (dma64) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ printk(" highDMA");
+ }
+
+ netdev->vlan_features = netdev->features;
+ printk("\n");
+}
+
+
+static void
+vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
+{
+ u32 tmp;
+
+ tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
+ *(u32 *)mac = tmp;
+
+ tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
+ mac[4] = tmp & 0xff;
+ mac[5] = (tmp >> 8) & 0xff;
+}
+
+
+static void
+vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
+{
+ u32 cfg;
+
+ /* intr settings */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_CONF_INTR);
+ cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ adapter->intr.type = cfg & 0x3;
+ adapter->intr.mask_mode = (cfg >> 2) & 0x3;
+
+ if (adapter->intr.type == VMXNET3_IT_AUTO) {
+ int err;
+
+#ifdef CONFIG_PCI_MSI
+ adapter->intr.msix_entries[0].entry = 0;
+ err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
+ VMXNET3_LINUX_MAX_MSIX_VECT);
+ if (!err) {
+ adapter->intr.num_intrs = 1;
+ adapter->intr.type = VMXNET3_IT_MSIX;
+ return;
+ }
+#endif
+
+ err = pci_enable_msi(adapter->pdev);
+ if (!err) {
+ adapter->intr.num_intrs = 1;
+ adapter->intr.type = VMXNET3_IT_MSI;
+ return;
+ }
+ }
+
+ adapter->intr.type = VMXNET3_IT_INTX;
+
+ /* INT-X related setting */
+ adapter->intr.num_intrs = 1;
+}
+
+
+static void
+vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
+{
+ if (adapter->intr.type == VMXNET3_IT_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->intr.type == VMXNET3_IT_MSI)
+ pci_disable_msi(adapter->pdev);
+ else
+ BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
+}
+
+
+static void
+vmxnet3_tx_timeout(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ adapter->tx_timeout_count++;
+
+ printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
+ schedule_work(&adapter->work);
+}
+
+
+static void
+vmxnet3_reset_work(struct work_struct *data)
+{
+ struct vmxnet3_adapter *adapter;
+
+ adapter = container_of(data, struct vmxnet3_adapter, work);
+
+ /* if another thread is resetting the device, no need to proceed */
+ if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ return;
+
+ /* if the device is closed, we must leave it alone */
+ if (netif_running(adapter->netdev)) {
+ printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+ vmxnet3_activate_dev(adapter);
+ } else {
+ printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
+ }
+
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+}
+
+
+static int __devinit
+vmxnet3_probe_device(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct net_device_ops vmxnet3_netdev_ops = {
+ .ndo_open = vmxnet3_open,
+ .ndo_stop = vmxnet3_close,
+ .ndo_start_xmit = vmxnet3_xmit_frame,
+ .ndo_set_mac_address = vmxnet3_set_mac_addr,
+ .ndo_change_mtu = vmxnet3_change_mtu,
+ .ndo_get_stats = vmxnet3_get_stats,
+ .ndo_tx_timeout = vmxnet3_tx_timeout,
+ .ndo_set_multicast_list = vmxnet3_set_mc,
+ .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vmxnet3_netpoll,
+#endif
+ };
+ int err;
+ bool dma64 = false; /* stupid gcc */
+ u32 ver;
+ struct net_device *netdev;
+ struct vmxnet3_adapter *adapter;
+ u8 mac[ETH_ALEN];
+
+ netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
+ if (!netdev) {
+ printk(KERN_ERR "Failed to alloc ethernet device for adapter "
+ "%s\n", pci_name(pdev));
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ adapter->shared = pci_alloc_consistent(adapter->pdev,
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa);
+ if (!adapter->shared) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_shared;
+ }
+
+ adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
+ sizeof(struct Vmxnet3_TxQueueDesc) +
+ sizeof(struct Vmxnet3_RxQueueDesc),
+ &adapter->queue_desc_pa);
+
+ if (!adapter->tqd_start) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_queue_desc;
+ }
+ adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
+ + 1);
+
+ adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
+ if (adapter->pm_conf == NULL) {
+ printk(KERN_ERR "Failed to allocate memory for %s\n",
+ pci_name(pdev));
+ err = -ENOMEM;
+ goto err_alloc_pm;
+ }
+
+ err = vmxnet3_alloc_pci_resources(adapter, &dma64);
+ if (err < 0)
+ goto err_alloc_pci;
+
+ ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
+ if (ver & 1) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
+ } else {
+ printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
+ " %s\n", ver, pci_name(pdev));
+ err = -EBUSY;
+ goto err_ver;
+ }
+
+ ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
+ if (ver & 1) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
+ } else {
+ printk(KERN_ERR "Incompatible upt version (0x%x) for "
+ "adapter %s\n", ver, pci_name(pdev));
+ err = -EBUSY;
+ goto err_ver;
+ }
+
+ vmxnet3_declare_features(adapter, dma64);
+
+ adapter->dev_number = atomic_read(&devices_found);
+ vmxnet3_alloc_intr_resources(adapter);
+
+ vmxnet3_read_mac_addr(adapter, mac);
+ memcpy(netdev->dev_addr, mac, netdev->addr_len);
+
+ netdev->netdev_ops = &vmxnet3_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+ vmxnet3_set_ethtool_ops(netdev);
+
+ INIT_WORK(&adapter->work, vmxnet3_reset_work);
+
+ netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ err = register_netdev(netdev);
+
+ if (err) {
+ printk(KERN_ERR "Failed to register adapter %s\n",
+ pci_name(pdev));
+ goto err_register;
+ }
+
+ set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+ atomic_inc(&devices_found);
+ return 0;
+
+err_register:
+ vmxnet3_free_intr_resources(adapter);
+err_ver:
+ vmxnet3_free_pci_resources(adapter);
+err_alloc_pci:
+ kfree(adapter->pm_conf);
+err_alloc_pm:
+ pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
+ sizeof(struct Vmxnet3_RxQueueDesc),
+ adapter->tqd_start, adapter->queue_desc_pa);
+err_alloc_queue_desc:
+ pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+err_alloc_shared:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ return err;
+}
+
+
+static void __devexit
+vmxnet3_remove_device(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ flush_scheduled_work();
+
+ unregister_netdev(netdev);
+
+ vmxnet3_free_intr_resources(adapter);
+ vmxnet3_free_pci_resources(adapter);
+ kfree(adapter->pm_conf);
+ pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
+ sizeof(struct Vmxnet3_RxQueueDesc),
+ adapter->tqd_start, adapter->queue_desc_pa);
+ pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+ free_netdev(netdev);
+}
+
+
+#ifdef CONFIG_PM
+
+static int
+vmxnet3_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_PMConf *pmConf;
+ struct ethhdr *ehdr;
+ struct arphdr *ahdr;
+ u8 *arpreq;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+ int i = 0;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ vmxnet3_disable_all_intrs(adapter);
+ vmxnet3_free_irqs(adapter);
+ vmxnet3_free_intr_resources(adapter);
+
+ netif_device_detach(netdev);
+ netif_stop_queue(netdev);
+
+ /* Create wake-up filters. */
+ pmConf = adapter->pm_conf;
+ memset(pmConf, 0, sizeof(*pmConf));
+
+ if (adapter->wol & WAKE_UCAST) {
+ pmConf->filters[i].patternSize = ETH_ALEN;
+ pmConf->filters[i].maskSize = 1;
+ memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
+ pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
+
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+ i++;
+ }
+
+ if (adapter->wol & WAKE_ARP) {
+ in_dev = in_dev_get(netdev);
+ if (!in_dev)
+ goto skip_arp;
+
+ ifa = (struct in_ifaddr *)in_dev->ifa_list;
+ if (!ifa)
+ goto skip_arp;
+
+ pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
+ sizeof(struct arphdr) + /* ARP header */
+ 2 * ETH_ALEN + /* 2 Ethernet addresses*/
+ 2 * sizeof(u32); /*2 IPv4 addresses */
+ pmConf->filters[i].maskSize =
+ (pmConf->filters[i].patternSize - 1) / 8 + 1;
+
+ /* ETH_P_ARP in Ethernet header. */
+ ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
+ ehdr->h_proto = htons(ETH_P_ARP);
+
+ /* ARPOP_REQUEST in ARP header. */
+ ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
+ ahdr->ar_op = htons(ARPOP_REQUEST);
+ arpreq = (u8 *)(ahdr + 1);
+
+ /* The Unicast IPv4 address in 'tip' field. */
+ arpreq += 2 * ETH_ALEN + sizeof(u32);
+ *(u32 *)arpreq = ifa->ifa_address;
+
+ /* The mask for the relevant bits. */
+ pmConf->filters[i].mask[0] = 0x00;
+ pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
+ pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
+ pmConf->filters[i].mask[3] = 0x00;
+ pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
+ pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
+ in_dev_put(in_dev);
+
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
+ i++;
+ }
+
+skip_arp:
+ if (adapter->wol & WAKE_MAGIC)
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
+
+ pmConf->numFilters = i;
+
+ adapter->shared->devRead.pmConfDesc.confVer = 1;
+ adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
+ adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_PMCFG);
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
+ adapter->wol);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
+
+ return 0;
+}
+
+
+static int
+vmxnet3_resume(struct device *device)
+{
+ int err;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct Vmxnet3_PMConf *pmConf;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ /* Destroy wake-up filters. */
+ pmConf = adapter->pm_conf;
+ memset(pmConf, 0, sizeof(*pmConf));
+
+ adapter->shared->devRead.pmConfDesc.confVer = 1;
+ adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
+ adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
+
+ netif_device_attach(netdev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err != 0)
+ return err;
+
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_PMCFG);
+ vmxnet3_alloc_intr_resources(adapter);
+ vmxnet3_request_irqs(adapter);
+ vmxnet3_enable_all_intrs(adapter);
+
+ return 0;
+}
+
+static struct dev_pm_ops vmxnet3_pm_ops = {
+ .suspend = vmxnet3_suspend,
+ .resume = vmxnet3_resume,
+};
+#endif
+
+static struct pci_driver vmxnet3_driver = {
+ .name = vmxnet3_driver_name,
+ .id_table = vmxnet3_pciid_table,
+ .probe = vmxnet3_probe_device,
+ .remove = __devexit_p(vmxnet3_remove_device),
+#ifdef CONFIG_PM
+ .driver.pm = &vmxnet3_pm_ops,
+#endif
+};
+
+
+static int __init
+vmxnet3_init_module(void)
+{
+ printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
+ VMXNET3_DRIVER_VERSION_REPORT);
+ return pci_register_driver(&vmxnet3_driver);
+}
+
+module_init(vmxnet3_init_module);
+
+
+static void
+vmxnet3_exit_module(void)
+{
+ pci_unregister_driver(&vmxnet3_driver);
+}
+
+module_exit(vmxnet3_exit_module);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
new file mode 100644
index 00000000000..c2c15e4cafc
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -0,0 +1,566 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ *
+ */
+
+
+#include "vmxnet3_int.h"
+
+struct vmxnet3_stat_desc {
+ char desc[ETH_GSTRING_LEN];
+ int offset;
+};
+
+
+static u32
+vmxnet3_get_rx_csum(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ return adapter->rxcsum;
+}
+
+
+static int
+vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->rxcsum != val) {
+ adapter->rxcsum = val;
+ if (netif_running(netdev)) {
+ if (val)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ }
+ }
+ return 0;
+}
+
+
+/* per tq stats maintained by the device */
+static const struct vmxnet3_stat_desc
+vmxnet3_tq_dev_stats[] = {
+ /* description, offset */
+ { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+ { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+ { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+ { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+ { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+ { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+ { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+ { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+ { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
+ { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+};
+
+/* per tq stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_tq_driver_stats[] = {
+ /* description, offset */
+ {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_total) },
+ { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_too_many_frags) },
+ { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_oversized_hdr) },
+ { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_hdr_inspect_err) },
+ { " tso", offsetof(struct vmxnet3_tq_driver_stats,
+ drop_tso) },
+ { "ring full", offsetof(struct vmxnet3_tq_driver_stats,
+ tx_ring_full) },
+ { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
+ linearized) },
+ { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
+ copy_skb_header) },
+ { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
+ oversized_hdr) },
+};
+
+/* per rq stats maintained by the device */
+static const struct vmxnet3_stat_desc
+vmxnet3_rq_dev_stats[] = {
+ { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+ { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+ { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+ { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+ { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+ { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+ { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+ { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+ { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+ { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
+};
+
+/* per rq stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_rq_driver_stats[] = {
+ /* description, offset */
+ { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_total) },
+ { " err", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_err) },
+ { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
+ drop_fcs) },
+ { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+ rx_buf_alloc_failure) },
+};
+
+/* gloabl stats maintained by the driver */
+static const struct vmxnet3_stat_desc
+vmxnet3_global_stats[] = {
+ /* description, offset */
+ { "tx timeout count", offsetof(struct vmxnet3_adapter,
+ tx_timeout_count) }
+};
+
+
+struct net_device_stats *
+vmxnet3_get_stats(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter;
+ struct vmxnet3_tq_driver_stats *drvTxStats;
+ struct vmxnet3_rq_driver_stats *drvRxStats;
+ struct UPT1_TxStats *devTxStats;
+ struct UPT1_RxStats *devRxStats;
+ struct net_device_stats *net_stats = &netdev->stats;
+
+ adapter = netdev_priv(netdev);
+
+ /* Collect the dev stats into the shared area */
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ /* Assuming that we have a single queue device */
+ devTxStats = &adapter->tqd_start->stats;
+ devRxStats = &adapter->rqd_start->stats;
+
+ /* Get access to the driver stats per queue */
+ drvTxStats = &adapter->tx_queue.stats;
+ drvRxStats = &adapter->rx_queue.stats;
+
+ memset(net_stats, 0, sizeof(*net_stats));
+
+ net_stats->rx_packets = devRxStats->ucastPktsRxOK +
+ devRxStats->mcastPktsRxOK +
+ devRxStats->bcastPktsRxOK;
+
+ net_stats->tx_packets = devTxStats->ucastPktsTxOK +
+ devTxStats->mcastPktsTxOK +
+ devTxStats->bcastPktsTxOK;
+
+ net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
+ devRxStats->mcastBytesRxOK +
+ devRxStats->bcastBytesRxOK;
+
+ net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
+ devTxStats->mcastBytesTxOK +
+ devTxStats->bcastBytesTxOK;
+
+ net_stats->rx_errors = devRxStats->pktsRxError;
+ net_stats->tx_errors = devTxStats->pktsTxError;
+ net_stats->rx_dropped = drvRxStats->drop_total;
+ net_stats->tx_dropped = drvTxStats->drop_total;
+ net_stats->multicast = devRxStats->mcastPktsRxOK;
+
+ return net_stats;
+}
+
+static int
+vmxnet3_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_tq_driver_stats) +
+ ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+ ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+ ARRAY_SIZE(vmxnet3_global_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+static int
+vmxnet3_get_regs_len(struct net_device *netdev)
+{
+ return 20 * sizeof(u32);
+}
+
+
+static void
+vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
+ drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+ strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
+ sizeof(drvinfo->version));
+ drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
+
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ ETHTOOL_BUSINFO_LEN);
+ drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
+ drvinfo->testinfo_len = 0;
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = vmxnet3_get_regs_len(netdev);
+}
+
+
+static void
+vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
+{
+ if (stringset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
+ memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+ memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
+ memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
+ memcpy(buf, vmxnet3_global_stats[i].desc,
+ ETH_GSTRING_LEN);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static u32
+vmxnet3_get_flags(struct net_device *netdev) {
+ return netdev->features;
+}
+
+static int
+vmxnet3_set_flags(struct net_device *netdev, u32 data) {
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
+ u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+
+ if (lro_requested ^ lro_present) {
+ /* toggle the LRO feature*/
+ netdev->features ^= NETIF_F_LRO;
+
+ /* update harware LRO capability accordingly */
+ if (lro_requested)
+ adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_LRO;
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_UPDATE_FEATURE);
+ }
+ return 0;
+}
+
+static void
+vmxnet3_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u8 *base;
+ int i;
+
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+
+ /* this does assume each counter is 64-bit wide */
+
+ base = (u8 *)&adapter->tqd_start->stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->tx_queue.stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
+
+ base = (u8 *)&adapter->rqd_start->stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
+
+ base = (u8 *)&adapter->rx_queue.stats;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+
+ base = (u8 *)adapter;
+ for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
+ *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
+}
+
+
+static void
+vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 *buf = p;
+
+ memset(p, 0, vmxnet3_get_regs_len(netdev));
+
+ regs->version = 1;
+
+ /* Update vmxnet3_get_regs_len if we want to dump more registers */
+
+ /* make each ring use multiple of 16 bytes */
+ buf[0] = adapter->tx_queue.tx_ring.next2fill;
+ buf[1] = adapter->tx_queue.tx_ring.next2comp;
+ buf[2] = adapter->tx_queue.tx_ring.gen;
+ buf[3] = 0;
+
+ buf[4] = adapter->tx_queue.comp_ring.next2proc;
+ buf[5] = adapter->tx_queue.comp_ring.gen;
+ buf[6] = adapter->tx_queue.stopped;
+ buf[7] = 0;
+
+ buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
+ buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
+ buf[10] = adapter->rx_queue.rx_ring[0].gen;
+ buf[11] = 0;
+
+ buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
+ buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
+ buf[14] = adapter->rx_queue.rx_ring[1].gen;
+ buf[15] = 0;
+
+ buf[16] = adapter->rx_queue.comp_ring.next2proc;
+ buf[17] = adapter->rx_queue.comp_ring.gen;
+ buf[18] = 0;
+ buf[19] = 0;
+}
+
+
+static void
+vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
+ wol->wolopts = adapter->wol;
+}
+
+
+static int
+vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
+ WAKE_MAGICSECURE)) {
+ return -EOPNOTSUPP;
+ }
+
+ adapter->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+ return 0;
+}
+
+
+static int
+vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP;
+ ecmd->advertising = ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (adapter->link_speed) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+ return 0;
+}
+
+
+static void
+vmxnet3_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+
+ param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
+ param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
+ param->rx_mini_max_pending = 0;
+ param->rx_jumbo_max_pending = 0;
+
+ param->rx_pending = adapter->rx_queue.rx_ring[0].size;
+ param->tx_pending = adapter->tx_queue.tx_ring.size;
+ param->rx_mini_pending = 0;
+ param->rx_jumbo_pending = 0;
+}
+
+
+static int
+vmxnet3_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ u32 new_tx_ring_size, new_rx_ring_size;
+ u32 sz;
+ int err = 0;
+
+ if (param->tx_pending == 0 || param->tx_pending >
+ VMXNET3_TX_RING_MAX_SIZE)
+ return -EINVAL;
+
+ if (param->rx_pending == 0 || param->rx_pending >
+ VMXNET3_RX_RING_MAX_SIZE)
+ return -EINVAL;
+
+
+ /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
+ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
+ ~VMXNET3_RING_SIZE_MASK;
+ new_tx_ring_size = min_t(u32, new_tx_ring_size,
+ VMXNET3_TX_RING_MAX_SIZE);
+ if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
+ VMXNET3_RING_SIZE_ALIGN) != 0)
+ return -EINVAL;
+
+ /* ring0 has to be a multiple of
+ * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
+ */
+ sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
+ new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
+ new_rx_ring_size = min_t(u32, new_rx_ring_size,
+ VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+ if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
+ sz) != 0)
+ return -EINVAL;
+
+ if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
+ new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
+ return 0;
+ }
+
+ /*
+ * Reset_work may be in the middle of resetting the device, wait for its
+ * completion.
+ */
+ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (netif_running(netdev)) {
+ vmxnet3_quiesce_dev(adapter);
+ vmxnet3_reset_dev(adapter);
+
+ /* recreate the rx queue and the tx queue based on the
+ * new sizes */
+ vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+ vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+
+ err = vmxnet3_create_queues(adapter, new_tx_ring_size,
+ new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+ if (err) {
+ /* failed, most likely because of OOM, try default
+ * size */
+ printk(KERN_ERR "%s: failed to apply new sizes, try the"
+ " default ones\n", netdev->name);
+ err = vmxnet3_create_queues(adapter,
+ VMXNET3_DEF_TX_RING_SIZE,
+ VMXNET3_DEF_RX_RING_SIZE,
+ VMXNET3_DEF_RX_RING_SIZE);
+ if (err) {
+ printk(KERN_ERR "%s: failed to create queues "
+ "with default sizes. Closing it\n",
+ netdev->name);
+ goto out;
+ }
+ }
+
+ err = vmxnet3_activate_dev(adapter);
+ if (err)
+ printk(KERN_ERR "%s: failed to re-activate, error %d."
+ " Closing it\n", netdev->name, err);
+ }
+
+out:
+ clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
+ if (err)
+ vmxnet3_force_close(adapter);
+
+ return err;
+}
+
+
+static struct ethtool_ops vmxnet3_ethtool_ops = {
+ .get_settings = vmxnet3_get_settings,
+ .get_drvinfo = vmxnet3_get_drvinfo,
+ .get_regs_len = vmxnet3_get_regs_len,
+ .get_regs = vmxnet3_get_regs,
+ .get_wol = vmxnet3_get_wol,
+ .set_wol = vmxnet3_set_wol,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = vmxnet3_get_rx_csum,
+ .set_rx_csum = vmxnet3_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+ .get_strings = vmxnet3_get_strings,
+ .get_flags = vmxnet3_get_flags,
+ .set_flags = vmxnet3_set_flags,
+ .get_sset_count = vmxnet3_get_sset_count,
+ .get_ethtool_stats = vmxnet3_get_ethtool_stats,
+ .get_ringparam = vmxnet3_get_ringparam,
+ .set_ringparam = vmxnet3_set_ringparam,
+};
+
+void vmxnet3_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops);
+}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
new file mode 100644
index 00000000000..6bb91576e99
--- /dev/null
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -0,0 +1,389 @@
+/*
+ * Linux driver for VMware's vmxnet3 ethernet NIC.
+ *
+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
+ *
+ */
+
+#ifndef _VMXNET3_INT_H
+#define _VMXNET3_INT_H
+
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/ioport.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+#include <asm/page.h>
+
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <asm/checksum.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/inetdevice.h>
+#include <linux/dst.h>
+
+#include "vmxnet3_defs.h"
+
+#ifdef DEBUG
+# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
+#else
+# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
+#endif
+
+
+/*
+ * Version numbers
+ */
+#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k"
+
+/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
+#define VMXNET3_DRIVER_VERSION_NUM 0x01000500
+
+
+/*
+ * Capabilities
+ */
+
+enum {
+ VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
+ VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
+ * IPv4 */
+ VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
+ VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
+ VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
+ VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
+ * offload */
+ VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
+ VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
+ VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
+ VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
+ VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
+ VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
+ VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
+ VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
+ VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
+ * for a pkt */
+ VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
+ VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
+ VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
+ VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
+ /* pages transmits */
+ VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
+ VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
+ VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
+ /* pkts up to 256kB. */
+ VMNET_CAP_UPT = 0x400000 /* Support UPT */
+};
+
+/*
+ * PCI vendor and device IDs.
+ */
+#define PCI_VENDOR_ID_VMWARE 0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0
+#define MAX_ETHERNET_CARDS 10
+#define MAX_PCI_PASSTHRU_DEVICE 6
+
+struct vmxnet3_cmd_ring {
+ union Vmxnet3_GenericDesc *base;
+ u32 size;
+ u32 next2fill;
+ u32 next2comp;
+ u8 gen;
+ dma_addr_t basePA;
+};
+
+static inline void
+vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
+{
+ ring->next2fill++;
+ if (unlikely(ring->next2fill == ring->size)) {
+ ring->next2fill = 0;
+ VMXNET3_FLIP_RING_GEN(ring->gen);
+ }
+}
+
+static inline void
+vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
+{
+ VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
+}
+
+static inline int
+vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
+{
+ return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
+ ring->next2comp - ring->next2fill - 1;
+}
+
+struct vmxnet3_comp_ring {
+ union Vmxnet3_GenericDesc *base;
+ u32 size;
+ u32 next2proc;
+ u8 gen;
+ u8 intr_idx;
+ dma_addr_t basePA;
+};
+
+static inline void
+vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
+{
+ ring->next2proc++;
+ if (unlikely(ring->next2proc == ring->size)) {
+ ring->next2proc = 0;
+ VMXNET3_FLIP_RING_GEN(ring->gen);
+ }
+}
+
+struct vmxnet3_tx_data_ring {
+ struct Vmxnet3_TxDataDesc *base;
+ u32 size;
+ dma_addr_t basePA;
+};
+
+enum vmxnet3_buf_map_type {
+ VMXNET3_MAP_INVALID = 0,
+ VMXNET3_MAP_NONE,
+ VMXNET3_MAP_SINGLE,
+ VMXNET3_MAP_PAGE,
+};
+
+struct vmxnet3_tx_buf_info {
+ u32 map_type;
+ u16 len;
+ u16 sop_idx;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+struct vmxnet3_tq_driver_stats {
+ u64 drop_total; /* # of pkts dropped by the driver, the
+ * counters below track droppings due to
+ * different reasons
+ */
+ u64 drop_too_many_frags;
+ u64 drop_oversized_hdr;
+ u64 drop_hdr_inspect_err;
+ u64 drop_tso;
+
+ u64 tx_ring_full;
+ u64 linearized; /* # of pkts linearized */
+ u64 copy_skb_header; /* # of times we have to copy skb header */
+ u64 oversized_hdr;
+};
+
+struct vmxnet3_tx_ctx {
+ bool ipv4;
+ u16 mss;
+ u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum
+ * offloading
+ */
+ u32 l4_hdr_size; /* only valid if mss != 0 */
+ u32 copy_size; /* # of bytes copied into the data ring */
+ union Vmxnet3_GenericDesc *sop_txd;
+ union Vmxnet3_GenericDesc *eop_txd;
+};
+
+struct vmxnet3_tx_queue {
+ spinlock_t tx_lock;
+ struct vmxnet3_cmd_ring tx_ring;
+ struct vmxnet3_tx_buf_info *buf_info;
+ struct vmxnet3_tx_data_ring data_ring;
+ struct vmxnet3_comp_ring comp_ring;
+ struct Vmxnet3_TxQueueCtrl *shared;
+ struct vmxnet3_tq_driver_stats stats;
+ bool stopped;
+ int num_stop; /* # of times the queue is
+ * stopped */
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+enum vmxnet3_rx_buf_type {
+ VMXNET3_RX_BUF_NONE = 0,
+ VMXNET3_RX_BUF_SKB = 1,
+ VMXNET3_RX_BUF_PAGE = 2
+};
+
+struct vmxnet3_rx_buf_info {
+ enum vmxnet3_rx_buf_type buf_type;
+ u16 len;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ };
+ dma_addr_t dma_addr;
+};
+
+struct vmxnet3_rx_ctx {
+ struct sk_buff *skb;
+ u32 sop_idx;
+};
+
+struct vmxnet3_rq_driver_stats {
+ u64 drop_total;
+ u64 drop_err;
+ u64 drop_fcs;
+ u64 rx_buf_alloc_failure;
+};
+
+struct vmxnet3_rx_queue {
+ struct vmxnet3_cmd_ring rx_ring[2];
+ struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_ctx rx_ctx;
+ u32 qid; /* rqID in RCD for buffer from 1st ring */
+ u32 qid2; /* rqID in RCD for buffer from 2nd ring */
+ u32 uncommitted[2]; /* # of buffers allocated since last RXPROD
+ * update */
+ struct vmxnet3_rx_buf_info *buf_info[2];
+ struct Vmxnet3_RxQueueCtrl *shared;
+ struct vmxnet3_rq_driver_stats stats;
+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT 1
+
+struct vmxnet3_intr {
+ enum vmxnet3_intr_mask_mode mask_mode;
+ enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
+ u8 num_intrs; /* # of intr vectors */
+ u8 event_intr_idx; /* idx of the intr vector for event */
+ u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+#ifdef CONFIG_PCI_MSI
+ struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
+#endif
+};
+
+#define VMXNET3_STATE_BIT_RESETTING 0
+#define VMXNET3_STATE_BIT_QUIESCED 1
+struct vmxnet3_adapter {
+ struct vmxnet3_tx_queue tx_queue;
+ struct vmxnet3_rx_queue rx_queue;
+ struct napi_struct napi;
+ struct vlan_group *vlan_grp;
+
+ struct vmxnet3_intr intr;
+
+ struct Vmxnet3_DriverShared *shared;
+ struct Vmxnet3_PMConf *pm_conf;
+ struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */
+ struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ u8 *hw_addr0; /* for BAR 0 */
+ u8 *hw_addr1; /* for BAR 1 */
+
+ /* feature control */
+ bool rxcsum;
+ bool lro;
+ bool jumbo_frame;
+
+ /* rx buffer related */
+ unsigned skb_buf_size;
+ int rx_buf_per_pkt; /* only apply to the 1st ring */
+ dma_addr_t shared_pa;
+ dma_addr_t queue_desc_pa;
+
+ /* Wake-on-LAN */
+ u32 wol;
+
+ /* Link speed */
+ u32 link_speed; /* in mbps */
+
+ u64 tx_timeout_count;
+ struct work_struct work;
+
+ unsigned long state; /* VMXNET3_STATE_BIT_xxx */
+
+ int dev_number;
+};
+
+#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
+ writel((val), (adapter)->hw_addr0 + (reg))
+#define VMXNET3_READ_BAR0_REG(adapter, reg) \
+ readl((adapter)->hw_addr0 + (reg))
+
+#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
+ writel((val), (adapter)->hw_addr1 + (reg))
+#define VMXNET3_READ_BAR1_REG(adapter, reg) \
+ readl((adapter)->hw_addr1 + (reg))
+
+#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
+#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
+ ((rq)->rx_ring[ring_idx].size >> 3)
+
+#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
+#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
+
+/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
+#define VMXNET3_DEF_TX_RING_SIZE 512
+#define VMXNET3_DEF_RX_RING_SIZE 256
+
+#define VMXNET3_MAX_ETH_HDR_SIZE 22
+#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
+
+int
+vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
+
+int
+vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_force_close(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
+ struct vmxnet3_adapter *adapter);
+
+void
+vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ struct vmxnet3_adapter *adapter);
+
+int
+vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
+ u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
+
+extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev);
+
+extern char vmxnet3_driver_name[];
+#endif
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 9693b0fd323..0bd898c9475 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 66360a2a14c..e2c33c06190 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -76,6 +76,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 2573c18b6aa..cd8cb95c5bd 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -84,6 +84,7 @@
#include <linux/kernel.h> /* printk(), and other useful stuff */
#include <linux/module.h>
#include <linux/string.h> /* inline memset(), etc. */
+#include <linux/sched.h>
#include <linux/slab.h> /* kmalloc(), kfree() */
#include <linux/stddef.h> /* offsetof(), etc. */
#include <linux/wanrouter.h> /* WAN router definitions */
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 81c8aec9df9..07d00b4cf48 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -81,6 +81,7 @@
*/
#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/list.h>
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3e90eb81618..beda387f2fc 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/pci.h>
+#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/if.h>
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cf5fd17ad70..f1bff98acd1 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -58,8 +58,7 @@ struct cisco_state {
spinlock_t lock;
unsigned long last_poll;
int up;
- int request_sent;
- u32 txseq; /* TX sequence number */
+ u32 txseq; /* TX sequence number, 0 = none */
u32 rxseq; /* RX sequence number */
};
@@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb)
struct cisco_packet *cisco_data;
struct in_device *in_dev;
__be32 addr, mask;
+ u32 ack;
if (skb->len < sizeof(struct hdlc_header))
goto rx_error;
@@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb)
case CISCO_KEEPALIVE_REQ:
spin_lock(&st->lock);
st->rxseq = ntohl(cisco_data->par1);
- if (st->request_sent &&
- ntohl(cisco_data->par2) == st->txseq) {
+ ack = ntohl(cisco_data->par2);
+ if (ack && (ack == st->txseq ||
+ /* our current REQ may be in transit */
+ ack == st->txseq - 1)) {
st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days;
@@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg)
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
htonl(st->rxseq));
- st->request_sent = 1;
spin_unlock(&st->lock);
st->timer.expires = jiffies + st->settings.interval * HZ;
@@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev)
unsigned long flags;
spin_lock_irqsave(&st->lock, flags);
- st->up = 0;
- st->request_sent = 0;
- st->txseq = st->rxseq = 0;
+ st->up = st->txseq = st->rxseq = 0;
spin_unlock_irqrestore(&st->lock, flags);
init_timer(&st->timer);
@@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev)
spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev);
- st->up = 0;
- st->request_sent = 0;
+ st->up = st->txseq = 0;
spin_unlock_irqrestore(&st->lock, flags);
}
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 83da596e205..58c66819f39 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index a52f29c72c3..f1340faaf02 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 49ea9c92b7e..d7a764a2fc1 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -31,13 +31,12 @@ config STRIP
---help---
Say Y if you have a Metricom radio and intend to use Starmode Radio
IP. STRIP is a radio protocol developed for the MosquitoNet project
- (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet
- traffic using Metricom radios. Metricom radios are small, battery
- powered, 100kbit/sec packet radio transceivers, about the size and
- weight of a cellular telephone. (You may also have heard them called
- "Metricom modems" but we avoid the term "modem" because it misleads
- many people into thinking that you can plug a Metricom modem into a
- phone line and use it as a modem.)
+ to send Internet traffic using Metricom radios. Metricom radios are
+ small, battery powered, 100kbit/sec packet radio transceivers, about
+ the size and weight of a cellular telephone. (You may also have heard
+ them called "Metricom modems" but we avoid the term "modem" because
+ it misleads many people into thinking that you can plug a Metricom
+ modem into a phone line and use it as a modem.)
You can use STRIP on any Linux machine with a serial port, although
it is obviously most useful for people with laptop computers. If you
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index 4f6ab132218..b07e4d3a6b4 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -266,7 +266,7 @@ do { \
#define ADM8211_SYNCTL_CS1 (1 << 28)
#define ADM8211_SYNCTL_CAL (1 << 27)
#define ADM8211_SYNCTL_SELCAL (1 << 26)
-#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22))
+#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22))
#define ADM8211_SYNCTL_RFMD (1 << 22)
#define ADM8211_SYNCTL_GENERAL (0x7 << 22)
/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
index 2ab1d59870f..a8b689635a3 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/net/wireless/arlan-proc.c
@@ -402,7 +402,7 @@ static int arlan_setup_card_by_book(struct net_device *dev)
static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
-static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -629,7 +629,7 @@ final:
*lenp = pos;
if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
else
{
*lenp = 0;
@@ -639,7 +639,7 @@ final:
}
-static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info161719(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -669,11 +669,11 @@ static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_infotxRing(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -698,11 +698,11 @@ static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp
SARLBNpln(u_char, txBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_inforxRing(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -726,11 +726,11 @@ static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp
SARLBNpln(u_char, rxBuffer, 0x800);
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
-static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_info18(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int i;
@@ -756,7 +756,7 @@ static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
final:
*lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ retv = proc_dostring(ctl, write, buffer, lenp, ppos);
return retv;
}
@@ -766,7 +766,7 @@ final:
static char conf_reset_result[200];
-static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
+static int arlan_configure(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
@@ -788,10 +788,10 @@ static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
return -1;
*lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, buffer, lenp, ppos);
}
-static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
+static int arlan_sysctl_reset(ctl_table * ctl, int write,
void __user *buffer, size_t * lenp, loff_t *ppos)
{
int pos = 0;
@@ -811,7 +811,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
} else
return -1;
*lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+ return proc_dostring(ctl, write, buffer, lenp, ppos);
}
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index b3e5cf3735b..dbd488da18b 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1141,7 +1141,8 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
int chain, idx, i;
- u8 f;
+ u32 phy_data = 0;
+ u8 f, tmp;
switch (channel->band) {
case IEEE80211_BAND_2GHZ:
@@ -1208,9 +1209,6 @@ static int ar9170_set_freq_cal_data(struct ar9170 *ar,
}
for (i = 0; i < 76; i++) {
- u32 phy_data;
- u8 tmp;
-
if (i < 25) {
tmp = ar9170_interpolate_val(i, &pwrs[0][0],
&vpds[0][0]);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0138ac8bf5..e974e5829e1 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -64,6 +64,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9170) },
/* Atheros TG121N */
{ USB_DEVICE(0x0cf3, 0x1001) },
+ /* TP-Link TL-WN821N v2 */
+ { USB_DEVICE(0x0cf3, 0x1002) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160A */
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 3234995e888..0ad6d0b76e9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -609,14 +609,24 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
AR_PHY_CH1_EXT_CCA,
AR_PHY_CH2_EXT_CCA
};
- u8 chainmask;
+ u8 chainmask, rx_chain_status;
+ rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
if (AR_SREV_9285(ah))
chainmask = 0x9;
- else if (AR_SREV_9280(ah) || AR_SREV_9287(ah))
- chainmask = 0x1B;
- else
- chainmask = 0x3F;
+ else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
+ if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ } else {
+ if (rx_chain_status & 0x4)
+ chainmask = 0x3F;
+ else if (rx_chain_status & 0x2)
+ chainmask = 0x1B;
+ else
+ chainmask = 0x09;
+ }
h = ah->nfCalHist;
@@ -697,6 +707,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
else if (AR_SREV_9285(ah))
noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
+ else if (AR_SREV_9287(ah))
+ noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
else
noise_floor = AR_PHY_CCA_MAX_AR5416_GOOD_VALUE;
@@ -924,6 +936,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
regVal |= (1 << (19 + i));
REG_WRITE(ah, 0x7834, regVal);
udelay(1);
+ regVal = REG_READ(ah, 0x7834);
regVal &= (~(0x1 << (19 + i)));
reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
regVal |= (reg_field << (19 + i));
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 019bcbba40e..9028ab193e4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -28,6 +28,7 @@ extern const struct ath9k_percal_data adc_init_dc_cal;
#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
+#define AR_PHY_CCA_MAX_AR9287_GOOD_VALUE -118
#define AR_PHY_CCA_MAX_HIGH_VALUE -62
#define AR_PHY_CCA_MIN_BAD_VALUE -140
#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index ae7fb5dcb26..4071fc91da0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -509,6 +509,8 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
eep->baseEepHeader.dacLpMode);
+ udelay(100);
+
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL, AR_PHY_FRAME_CTL_TX_CLIP,
pModal->miscBits >> 2);
@@ -902,7 +904,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
u16 powerLimit)
{
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
-#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */
+#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index b6c6cca0781..ca7694caf36 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -842,7 +842,7 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
{
- if (AR_SREV_9287_11(ah))
+ if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9287Modes_rx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
@@ -853,7 +853,7 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
else if (AR_SREV_9280_20(ah))
ath9k_hw_init_rxgain_ini(ah);
- if (AR_SREV_9287_11(ah)) {
+ if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9287Modes_tx_gain_9287_1_1,
ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
@@ -965,7 +965,7 @@ int ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
if (ah->is_pciexpress)
- ath9k_hw_configpcipowersave(ah, 0);
+ ath9k_hw_configpcipowersave(ah, 0, 0);
else
ath9k_hw_disablepcie(ah);
@@ -1273,6 +1273,15 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
*/
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
+ if (AR_SREV_9280_10_OR_LATER(ah)) {
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_PCU_MISC_MODE2_HWWAR1);
+
+ if (AR_SREV_9287_10_OR_LATER(ah))
+ val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
+
+ REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
+ }
if (!AR_SREV_5416_20_OR_LATER(ah) ||
AR_SREV_9280_10_OR_LATER(ah))
@@ -1784,7 +1793,7 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- if (OLC_FOR_AR9280_20_LATER) {
+ if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) {
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON))
return false;
} else if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
@@ -2338,6 +2347,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
u32 macStaId1;
+ u64 tsf = 0;
int i, rx_chainmask, r;
ah->extprotspacing = sc->ht_extprotspacing;
@@ -2347,7 +2357,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
- if (curchan)
+ if (curchan && !ah->chip_fullsleep)
ath9k_hw_getnf(ah, curchan);
if (bChannelChange &&
@@ -2356,8 +2366,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
(chan->channel != ah->curchan->channel) &&
((chan->channelFlags & CHANNEL_ALL) ==
(ah->curchan->channelFlags & CHANNEL_ALL)) &&
- (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
- !IS_CHAN_A_5MHZ_SPACED(ah->curchan)))) {
+ !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
+ IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) {
ath9k_hw_loadnf(ah, ah->curchan);
@@ -2372,6 +2382,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
+ /* For chips on which RTC reset is done, save TSF before it gets cleared */
+ if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ tsf = ath9k_hw_gettsf64(ah);
+
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
@@ -2398,6 +2412,10 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
udelay(50);
}
+ /* Restore TSF */
+ if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+ ath9k_hw_settsf64(ah, tsf);
+
if (AR_SREV_9280_10_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
@@ -3005,9 +3023,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
* Programming the SerDes must go through the same 288 bit serial shift
* register as the other analog registers. Hence the 9 writes.
*/
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
+void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
{
u8 i;
+ u32 val;
if (ah->is_pciexpress != true)
return;
@@ -3017,84 +3036,113 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore)
return;
/* Nothing to do on restore for 11N */
- if (restore)
- return;
+ if (!restore) {
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /*
+ * AR9280 2.0 or later chips use SerDes values from the
+ * initvals.h initialized depending on chipset during
+ * ath9k_hw_init()
+ */
+ for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
+ INI_RA(&ah->iniPcieSerdes, i, 1));
+ }
+ } else if (AR_SREV_9280(ah) &&
+ (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+
+ /* Shut off CLKREQ active in L1 */
+ if (ah->config.pcie_clock_req)
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
+ else
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /*
- * AR9280 2.0 or later chips use SerDes values from the
- * initvals.h initialized depending on chipset during
- * ath9k_hw_init()
- */
- for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
- REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
- INI_RA(&ah->iniPcieSerdes, i, 1));
- }
- } else if (AR_SREV_9280(ah) &&
- (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- /* Shut off CLKREQ active in L1 */
- if (ah->config.pcie_clock_req)
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
- else
- REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
-
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
+ } else {
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+ /* RX shut off when elecidle is asserted */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
- } else {
- REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
+ /*
+ * Ignore ah->ah_config.pcie_clock_req setting for
+ * pre-AR9280 11n
+ */
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
- /* RX shut off when elecidle is asserted */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
+ REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
- /*
- * Ignore ah->ah_config.pcie_clock_req setting for
- * pre-AR9280 11n
- */
- REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
+ /* Load the new settings */
+ REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
+ }
- REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
- REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
- REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
+ udelay(1000);
- /* Load the new settings */
- REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
- }
+ /* set bit 19 to allow forcing of pcie core into L1 state */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
- udelay(1000);
+ /* Several PCIe massages to ensure proper behaviour */
+ if (ah->config.pcie_waen) {
+ val = ah->config.pcie_waen;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) {
+ val = AR9285_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
+ /*
+ * On AR9280 chips bit 22 of 0x4004 needs to be
+ * set otherwise card may disappear.
+ */
+ val = AR9280_WA_DEFAULT;
+ if (!power_off)
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else
+ val = AR_WA_DEFAULT;
+ }
- /* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_WRITE(ah, AR_WA, val);
+ }
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen) {
- REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
- } else {
- if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah))
- REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT);
+ if (power_off) {
/*
- * On AR9280 chips bit 22 of 0x4004 needs to be set to
- * otherwise card may disappear.
+ * Set PCIe workaround bits
+ * bit 14 in WA register (disable L1) should only
+ * be set when device enters D3 and be cleared
+ * when device comes back to D0.
*/
- else if (AR_SREV_9280(ah))
- REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT);
- else
- REG_WRITE(ah, AR_WA, AR_WA_DEFAULT);
+ if (ah->config.pcie_waen) {
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ } else {
+ if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
+ AR_SREV_9287(ah)) &&
+ (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
+ (AR_SREV_9280(ah) &&
+ (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
+ }
+ }
}
}
@@ -3652,15 +3700,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
#endif
- if ((ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9160) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9100) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9280) ||
- (ah->hw_version.macVersion == AR_SREV_VERSION_9285))
- pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
- else
- pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
+ pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 9106a0b537d..b8923457182 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -106,7 +106,7 @@
#define AH_TSF_WRITE_TIMEOUT 100 /* (us) */
#define AH_TIME_QUANTUM 10
#define AR_KEYTABLE_SIZE 128
-#define POWER_UP_TIME 200000
+#define POWER_UP_TIME 10000
#define SPUR_RSSI_THRESH 40
#define CAB_TIMEOUT_VAL 10
@@ -650,7 +650,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs);
bool ath9k_hw_setpower(struct ath_hw *ah,
enum ath9k_power_mode mode);
-void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore);
+void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
/* Interrupt Handling */
bool ath9k_hw_intrpend(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3dc7b5a13e6..52bed89063d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1131,7 +1131,7 @@ void ath_radio_enable(struct ath_softc *sc)
int r;
ath9k_ps_wakeup(sc);
- ath9k_hw_configpcipowersave(ah, 0);
+ ath9k_hw_configpcipowersave(ah, 0, 0);
if (!ah->curchan)
ah->curchan = ath_get_curchannel(sc, sc->hw);
@@ -1202,7 +1202,7 @@ void ath_radio_disable(struct ath_softc *sc)
spin_unlock_bh(&sc->sc_resetlock);
ath9k_hw_phy_disable(ah);
- ath9k_hw_configpcipowersave(ah, 1);
+ ath9k_hw_configpcipowersave(ah, 1, 1);
ath9k_ps_restore(sc);
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
@@ -1226,11 +1226,6 @@ static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-
- if (blocked)
- ath_radio_disable(sc);
- else
- ath_radio_enable(sc);
}
static void ath_start_rfkill_poll(struct ath_softc *sc)
@@ -1260,6 +1255,7 @@ void ath_detach(struct ath_softc *sc)
DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
ath_deinit_leds(sc);
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
for (i = 0; i < sc->num_sec_wiphy; i++) {
struct ath_wiphy *aphy = sc->sec_wiphy[i];
@@ -1942,7 +1938,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
init_channel = ath_get_curchannel(sc, hw);
/* Reset SERDES registers */
- ath9k_hw_configpcipowersave(sc->sc_ah, 0);
+ ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0);
/*
* The basic interface to setting the hardware in a good
@@ -2166,11 +2162,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(sc->sc_ah);
- ath9k_hw_configpcipowersave(sc->sc_ah, 1);
+ ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
sc->sc_flags |= SC_OP_INVALID;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index e5c29eb86e8..d83b77f821e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -676,8 +676,9 @@
#define AR_RC_HOSTIF 0x00000100
#define AR_WA 0x4004
+#define AR_WA_D3_L1_DISABLE (1 << 14)
#define AR9285_WA_DEFAULT 0x004a05cb
-#define AR9280_WA_DEFAULT 0x0040073f
+#define AR9280_WA_DEFAULT 0x0040073b
#define AR_WA_DEFAULT 0x0000073f
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 83e38134acc..54ea61c15d8 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -61,11 +61,28 @@ config B43_PCMCIA
If unsure, say N.
+config B43_SDIO
+ bool "Broadcom 43xx SDIO device support (EXPERIMENTAL)"
+ depends on B43 && SSB_SDIOHOST_POSSIBLE && EXPERIMENTAL
+ select SSB_SDIOHOST
+ ---help---
+ Broadcom 43xx device support for Soft-MAC SDIO devices.
+
+ With this config option you can drive Soft-MAC b43 cards with a
+ Secure Digital I/O interface.
+ This includes the WLAN daughter card found on the Nintendo Wii
+ video game console.
+ Note that this does not support Broadcom 43xx Full-MAC devices.
+
+ It's safe to select Y here, even if you don't have a B43 SDIO device.
+
+ If unsure, say N.
+
# Data transfers to the device via PIO
-# This is only needed on PCMCIA devices. All others can do DMA properly.
+# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
config B43_PIO
bool
- depends on B43 && (B43_PCMCIA || B43_FORCE_PIO)
+ depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
select SSB_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index da379f4b0c3..84772a2542d 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -16,6 +16,7 @@ b43-$(CONFIG_B43_PIO) += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
+b43-$(CONFIG_B43_SDIO) += sdio.o
b43-$(CONFIG_B43_DEBUG) += debugfs.o
obj-$(CONFIG_B43) += b43.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 09cfe68537b..660716214d4 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -607,86 +607,7 @@ struct b43_qos_params {
struct ieee80211_tx_queue_params p;
};
-struct b43_wldev;
-
-/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
-struct b43_wl {
- /* Pointer to the active wireless device on this chip */
- struct b43_wldev *current_dev;
- /* Pointer to the ieee80211 hardware data structure */
- struct ieee80211_hw *hw;
-
- /* Global driver mutex. Every operation must run with this mutex locked. */
- struct mutex mutex;
- /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ
- * handler, only. This basically is just the IRQ mask register. */
- spinlock_t hardirq_lock;
-
- /* The number of queues that were registered with the mac80211 subsystem
- * initially. This is a backup copy of hw->queues in case hw->queues has
- * to be dynamically lowered at runtime (Firmware does not support QoS).
- * hw->queues has to be restored to the original value before unregistering
- * from the mac80211 subsystem. */
- u16 mac80211_initially_registered_queues;
-
- /* R/W lock for data transmission.
- * Transmissions on 2+ queues can run concurrently, but somebody else
- * might sync with TX by write_lock_irqsave()'ing. */
- rwlock_t tx_lock;
- /* Lock for LEDs access. */
- spinlock_t leds_lock;
-
- /* We can only have one operating interface (802.11 core)
- * at a time. General information about this interface follows.
- */
-
- struct ieee80211_vif *vif;
- /* The MAC address of the operating interface. */
- u8 mac_addr[ETH_ALEN];
- /* Current BSSID */
- u8 bssid[ETH_ALEN];
- /* Interface type. (NL80211_IFTYPE_XXX) */
- int if_type;
- /* Is the card operating in AP, STA or IBSS mode? */
- bool operating;
- /* filter flags */
- unsigned int filter_flags;
- /* Stats about the wireless interface */
- struct ieee80211_low_level_stats ieee_stats;
-
-#ifdef CONFIG_B43_HWRNG
- struct hwrng rng;
- bool rng_initialized;
- char rng_name[30 + 1];
-#endif /* CONFIG_B43_HWRNG */
-
- /* List of all wireless devices on this chip */
- struct list_head devlist;
- u8 nr_devs;
-
- bool radiotap_enabled;
- bool radio_enabled;
-
- /* The beacon we are currently using (AP or IBSS mode). */
- struct sk_buff *current_beacon;
- bool beacon0_uploaded;
- bool beacon1_uploaded;
- bool beacon_templates_virgin; /* Never wrote the templates? */
- struct work_struct beacon_update_trigger;
-
- /* The current QOS parameters for the 4 queues. */
- struct b43_qos_params qos_params[4];
-
- /* Work for adjustment of the transmission power.
- * This is scheduled when we determine that the actual TX output
- * power doesn't match what we want. */
- struct work_struct txpower_adjust_work;
-
- /* Packet transmit work */
- struct work_struct tx_work;
- /* Queue of packets to be transmitted. */
- struct sk_buff_head tx_queue;
-};
+struct b43_wl;
/* The type of the firmware file. */
enum b43_firmware_file_type {
@@ -768,13 +689,10 @@ struct b43_wldev {
/* The device initialization status.
* Use b43_status() to query. */
atomic_t __init_status;
- /* Saved init status for handling suspend. */
- int suspend_init_status;
bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */
bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
bool radio_hw_enable; /* saved state of radio hardware enabled state */
- bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
@@ -794,12 +712,6 @@ struct b43_wldev {
/* Various statistics about the physical device. */
struct b43_stats stats;
- /* The device LEDs. */
- struct b43_led led_tx;
- struct b43_led led_rx;
- struct b43_led led_assoc;
- struct b43_led led_radio;
-
/* Reason code of the last interrupt. */
u32 irq_reason;
u32 dma_reason[6];
@@ -830,9 +742,104 @@ struct b43_wldev {
/* Debugging stuff follows. */
#ifdef CONFIG_B43_DEBUG
struct b43_dfsentry *dfsentry;
+ unsigned int irq_count;
+ unsigned int irq_bit_count[32];
+ unsigned int tx_count;
+ unsigned int rx_count;
#endif
};
+/*
+ * Include goes here to avoid a dependency problem.
+ * A better fix would be to integrate xmit.h into b43.h.
+ */
+#include "xmit.h"
+
+/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
+struct b43_wl {
+ /* Pointer to the active wireless device on this chip */
+ struct b43_wldev *current_dev;
+ /* Pointer to the ieee80211 hardware data structure */
+ struct ieee80211_hw *hw;
+
+ /* Global driver mutex. Every operation must run with this mutex locked. */
+ struct mutex mutex;
+ /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ
+ * handler, only. This basically is just the IRQ mask register. */
+ spinlock_t hardirq_lock;
+
+ /* The number of queues that were registered with the mac80211 subsystem
+ * initially. This is a backup copy of hw->queues in case hw->queues has
+ * to be dynamically lowered at runtime (Firmware does not support QoS).
+ * hw->queues has to be restored to the original value before unregistering
+ * from the mac80211 subsystem. */
+ u16 mac80211_initially_registered_queues;
+
+ /* We can only have one operating interface (802.11 core)
+ * at a time. General information about this interface follows.
+ */
+
+ struct ieee80211_vif *vif;
+ /* The MAC address of the operating interface. */
+ u8 mac_addr[ETH_ALEN];
+ /* Current BSSID */
+ u8 bssid[ETH_ALEN];
+ /* Interface type. (NL80211_IFTYPE_XXX) */
+ int if_type;
+ /* Is the card operating in AP, STA or IBSS mode? */
+ bool operating;
+ /* filter flags */
+ unsigned int filter_flags;
+ /* Stats about the wireless interface */
+ struct ieee80211_low_level_stats ieee_stats;
+
+#ifdef CONFIG_B43_HWRNG
+ struct hwrng rng;
+ bool rng_initialized;
+ char rng_name[30 + 1];
+#endif /* CONFIG_B43_HWRNG */
+
+ /* List of all wireless devices on this chip */
+ struct list_head devlist;
+ u8 nr_devs;
+
+ bool radiotap_enabled;
+ bool radio_enabled;
+
+ /* The beacon we are currently using (AP or IBSS mode). */
+ struct sk_buff *current_beacon;
+ bool beacon0_uploaded;
+ bool beacon1_uploaded;
+ bool beacon_templates_virgin; /* Never wrote the templates? */
+ struct work_struct beacon_update_trigger;
+
+ /* The current QOS parameters for the 4 queues. */
+ struct b43_qos_params qos_params[4];
+
+ /* Work for adjustment of the transmission power.
+ * This is scheduled when we determine that the actual TX output
+ * power doesn't match what we want. */
+ struct work_struct txpower_adjust_work;
+
+ /* Packet transmit work */
+ struct work_struct tx_work;
+ /* Queue of packets to be transmitted. */
+ struct sk_buff_head tx_queue;
+
+ /* The device LEDs. */
+ struct b43_leds leds;
+
+#ifdef CONFIG_B43_PIO
+ /*
+ * RX/TX header/tail buffers used by the frame transmit functions.
+ */
+ struct b43_rxhdr_fw4 rxhdr;
+ struct b43_txhdr txhdr;
+ u8 rx_tail[4];
+ u8 tx_tail[4];
+#endif /* CONFIG_B43_PIO */
+};
+
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
{
return hw->priv;
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 8f64943e3f6..80b19a44a40 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -689,6 +689,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
+ add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0);
#undef add_dyn_dbg
}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index e47b4b488b0..822aad8842f 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -13,6 +13,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
B43_DBG_LO,
B43_DBG_FIRMWARE,
B43_DBG_KEYS,
+ B43_DBG_VERBOSESTATS,
__B43_NR_DYNDBG,
};
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index a467ee260a1..8701034569f 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1428,9 +1428,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
ring->nr_failed_tx_packets++;
ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
- ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
+ ieee80211_tx_status(dev->wl->hw, meta->skb);
- /* skb is freed by ieee80211_tx_status_irqsafe() */
+ /* skb is freed by ieee80211_tx_status() */
meta->skb = NULL;
} else {
/* No need to call free_descriptor_buffer here, as
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index c8b317094c3..1e8dba48800 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -34,57 +34,88 @@
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
- struct b43_wl *wl = dev->wl;
- unsigned long flags;
u16 ctl;
- spin_lock_irqsave(&wl->leds_lock, flags);
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl &= ~(1 << led_index);
else
ctl |= (1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
- spin_unlock_irqrestore(&wl->leds_lock, flags);
}
static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
- struct b43_wl *wl = dev->wl;
- unsigned long flags;
u16 ctl;
- spin_lock_irqsave(&wl->leds_lock, flags);
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl |= (1 << led_index);
else
ctl &= ~(1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
- spin_unlock_irqrestore(&wl->leds_lock, flags);
}
-/* Callback from the LED subsystem. */
-static void b43_led_brightness_set(struct led_classdev *led_dev,
- enum led_brightness brightness)
+static void b43_led_update(struct b43_wldev *dev,
+ struct b43_led *led)
{
- struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
- struct b43_wldev *dev = led->dev;
bool radio_enabled;
+ bool turn_on;
- if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED))
+ if (!led->wl)
return;
- /* Checking the radio-enabled status here is slightly racy,
- * but we want to avoid the locking overhead and we don't care
- * whether the LED has the wrong state for a second. */
radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
- if (brightness == LED_OFF || !radio_enabled)
- b43_led_turn_off(dev, led->index, led->activelow);
+ /* The led->state read is racy, but we don't care. In case we raced
+ * with the brightness_set handler, we will be called again soon
+ * to fixup our state. */
+ if (radio_enabled)
+ turn_on = atomic_read(&led->state) != LED_OFF;
else
+ turn_on = 0;
+ if (turn_on == led->hw_state)
+ return;
+ led->hw_state = turn_on;
+
+ if (turn_on)
b43_led_turn_on(dev, led->index, led->activelow);
+ else
+ b43_led_turn_off(dev, led->index, led->activelow);
+}
+
+static void b43_leds_work(struct work_struct *work)
+{
+ struct b43_leds *leds = container_of(work, struct b43_leds, work);
+ struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
+ struct b43_wldev *dev;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
+ goto out_unlock;
+
+ b43_led_update(dev, &wl->leds.led_tx);
+ b43_led_update(dev, &wl->leds.led_rx);
+ b43_led_update(dev, &wl->leds.led_radio);
+ b43_led_update(dev, &wl->leds.led_assoc);
+
+out_unlock:
+ mutex_unlock(&wl->mutex);
+}
+
+/* Callback from the LED subsystem. */
+static void b43_led_brightness_set(struct led_classdev *led_dev,
+ enum led_brightness brightness)
+{
+ struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
+ struct b43_wl *wl = led->wl;
+
+ if (likely(!wl->leds.stop)) {
+ atomic_set(&led->state, brightness);
+ ieee80211_queue_work(wl->hw, &wl->leds.work);
+ }
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
@@ -93,15 +124,15 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
{
int err;
- b43_led_turn_off(dev, led_index, activelow);
- if (led->dev)
+ if (led->wl)
return -EEXIST;
if (!default_trigger)
return -EINVAL;
- led->dev = dev;
+ led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
strncpy(led->name, name, sizeof(led->name));
+ atomic_set(&led->state, 0);
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
@@ -110,19 +141,19 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
err = led_classdev_register(dev->dev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
- led->dev = NULL;
+ led->wl = NULL;
return err;
}
+
return 0;
}
static void b43_unregister_led(struct b43_led *led)
{
- if (!led->dev)
+ if (!led->wl)
return;
led_classdev_unregister(&led->led_dev);
- b43_led_turn_off(led->dev, led->index, led->activelow);
- led->dev = NULL;
+ led->wl = NULL;
}
static void b43_map_led(struct b43_wldev *dev,
@@ -137,24 +168,20 @@ static void b43_map_led(struct b43_wldev *dev,
* generic LED triggers. */
switch (behaviour) {
case B43_LED_INACTIVE:
- break;
case B43_LED_OFF:
- b43_led_turn_off(dev, led_index, activelow);
- break;
case B43_LED_ON:
- b43_led_turn_on(dev, led_index, activelow);
break;
case B43_LED_ACTIVITY:
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
"b43-%s::tx", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_tx, name,
+ b43_register_led(dev, &dev->wl->leds.led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
"b43-%s::rx", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_rx, name,
+ b43_register_led(dev, &dev->wl->leds.led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
break;
@@ -164,18 +191,15 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_radio, name,
+ b43_register_led(dev, &dev->wl->leds.led_radio, name,
ieee80211_get_radio_led_name(hw),
led_index, activelow);
- /* Sync the RF-kill LED state with radio and switch states. */
- if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev))
- b43_led_turn_on(dev, led_index, activelow);
break;
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
"b43-%s::assoc", wiphy_name(hw->wiphy));
- b43_register_led(dev, &dev->led_assoc, name,
+ b43_register_led(dev, &dev->wl->leds.led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
break;
@@ -186,58 +210,150 @@ static void b43_map_led(struct b43_wldev *dev,
}
}
-void b43_leds_init(struct b43_wldev *dev)
+static void b43_led_get_sprominfo(struct b43_wldev *dev,
+ unsigned int led_index,
+ enum b43_led_behaviour *behaviour,
+ bool *activelow)
{
struct ssb_bus *bus = dev->dev->bus;
u8 sprom[4];
- int i;
- enum b43_led_behaviour behaviour;
- bool activelow;
sprom[0] = bus->sprom.gpio0;
sprom[1] = bus->sprom.gpio1;
sprom[2] = bus->sprom.gpio2;
sprom[3] = bus->sprom.gpio3;
- for (i = 0; i < 4; i++) {
- if (sprom[i] == 0xFF) {
- /* There is no LED information in the SPROM
- * for this LED. Hardcode it here. */
- activelow = 0;
- switch (i) {
- case 0:
- behaviour = B43_LED_ACTIVITY;
- activelow = 1;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
- behaviour = B43_LED_RADIO_ALL;
- break;
- case 1:
- behaviour = B43_LED_RADIO_B;
- if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
- behaviour = B43_LED_ASSOC;
- break;
- case 2:
- behaviour = B43_LED_RADIO_A;
- break;
- case 3:
- behaviour = B43_LED_OFF;
- break;
- default:
- B43_WARN_ON(1);
- return;
- }
+ if (sprom[led_index] == 0xFF) {
+ /* There is no LED information in the SPROM
+ * for this LED. Hardcode it here. */
+ *activelow = 0;
+ switch (led_index) {
+ case 0:
+ *behaviour = B43_LED_ACTIVITY;
+ *activelow = 1;
+ if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
+ *behaviour = B43_LED_RADIO_ALL;
+ break;
+ case 1:
+ *behaviour = B43_LED_RADIO_B;
+ if (bus->boardinfo.vendor == PCI_VENDOR_ID_ASUSTEK)
+ *behaviour = B43_LED_ASSOC;
+ break;
+ case 2:
+ *behaviour = B43_LED_RADIO_A;
+ break;
+ case 3:
+ *behaviour = B43_LED_OFF;
+ break;
+ default:
+ B43_WARN_ON(1);
+ return;
+ }
+ } else {
+ *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
+ *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
+ }
+}
+
+void b43_leds_init(struct b43_wldev *dev)
+{
+ struct b43_led *led;
+ unsigned int i;
+ enum b43_led_behaviour behaviour;
+ bool activelow;
+
+ /* Sync the RF-kill LED state (if we have one) with radio and switch states. */
+ led = &dev->wl->leds.led_radio;
+ if (led->wl) {
+ if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
+ b43_led_turn_on(dev, led->index, led->activelow);
+ led->hw_state = 1;
+ atomic_set(&led->state, 1);
} else {
- behaviour = sprom[i] & B43_LED_BEHAVIOUR;
- activelow = !!(sprom[i] & B43_LED_ACTIVELOW);
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
}
- b43_map_led(dev, i, behaviour, activelow);
}
+
+ /* Initialize TX/RX/ASSOC leds */
+ led = &dev->wl->leds.led_tx;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+ led = &dev->wl->leds.led_rx;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+ led = &dev->wl->leds.led_assoc;
+ if (led->wl) {
+ b43_led_turn_off(dev, led->index, led->activelow);
+ led->hw_state = 0;
+ atomic_set(&led->state, 0);
+ }
+
+ /* Initialize other LED states. */
+ for (i = 0; i < B43_MAX_NR_LEDS; i++) {
+ b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
+ switch (behaviour) {
+ case B43_LED_OFF:
+ b43_led_turn_off(dev, i, activelow);
+ break;
+ case B43_LED_ON:
+ b43_led_turn_on(dev, i, activelow);
+ break;
+ default:
+ /* Leave others as-is. */
+ break;
+ }
+ }
+
+ dev->wl->leds.stop = 0;
}
void b43_leds_exit(struct b43_wldev *dev)
{
- b43_unregister_led(&dev->led_tx);
- b43_unregister_led(&dev->led_rx);
- b43_unregister_led(&dev->led_assoc);
- b43_unregister_led(&dev->led_radio);
+ struct b43_leds *leds = &dev->wl->leds;
+
+ b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
+ b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
+ b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
+ b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
+}
+
+void b43_leds_stop(struct b43_wldev *dev)
+{
+ struct b43_leds *leds = &dev->wl->leds;
+
+ leds->stop = 1;
+ cancel_work_sync(&leds->work);
+}
+
+void b43_leds_register(struct b43_wldev *dev)
+{
+ unsigned int i;
+ enum b43_led_behaviour behaviour;
+ bool activelow;
+
+ INIT_WORK(&dev->wl->leds.work, b43_leds_work);
+
+ /* Register the LEDs to the LED subsystem. */
+ for (i = 0; i < B43_MAX_NR_LEDS; i++) {
+ b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
+ b43_map_led(dev, i, behaviour, activelow);
+ }
+}
+
+void b43_leds_unregister(struct b43_wl *wl)
+{
+ struct b43_leds *leds = &wl->leds;
+
+ b43_unregister_led(&leds->led_tx);
+ b43_unregister_led(&leds->led_rx);
+ b43_unregister_led(&leds->led_assoc);
+ b43_unregister_led(&leds->led_radio);
}
diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h
index b8b1dd52124..4c56187810f 100644
--- a/drivers/net/wireless/b43/leds.h
+++ b/drivers/net/wireless/b43/leds.h
@@ -7,12 +7,13 @@ struct b43_wldev;
#include <linux/types.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#define B43_LED_MAX_NAME_LEN 31
struct b43_led {
- struct b43_wldev *dev;
+ struct b43_wl *wl;
/* The LED class device */
struct led_classdev led_dev;
/* The index number of the LED. */
@@ -22,8 +23,24 @@ struct b43_led {
bool activelow;
/* The unique name string for this LED device. */
char name[B43_LED_MAX_NAME_LEN + 1];
+ /* The current status of the LED. This is updated locklessly. */
+ atomic_t state;
+ /* The active state in hardware. */
+ bool hw_state;
};
+struct b43_leds {
+ struct b43_led led_tx;
+ struct b43_led led_rx;
+ struct b43_led led_radio;
+ struct b43_led led_assoc;
+
+ bool stop;
+ struct work_struct work;
+};
+
+#define B43_MAX_NR_LEDS 4
+
#define B43_LED_BEHAVIOUR 0x7F
#define B43_LED_ACTIVELOW 0x80
/* LED behaviour values */
@@ -42,23 +59,35 @@ enum b43_led_behaviour {
B43_LED_INACTIVE,
};
+void b43_leds_register(struct b43_wldev *dev);
+void b43_leds_unregister(struct b43_wl *wl);
void b43_leds_init(struct b43_wldev *dev);
void b43_leds_exit(struct b43_wldev *dev);
+void b43_leds_stop(struct b43_wldev *dev);
#else /* CONFIG_B43_LEDS */
/* LED support disabled */
-struct b43_led {
+struct b43_leds {
/* empty */
};
+static inline void b43_leds_register(struct b43_wldev *dev)
+{
+}
+static inline void b43_leds_unregister(struct b43_wl *wl)
+{
+}
static inline void b43_leds_init(struct b43_wldev *dev)
{
}
static inline void b43_leds_exit(struct b43_wldev *dev)
{
}
+static inline void b43_leds_stop(struct b43_wldev *dev)
+{
+}
#endif /* CONFIG_B43_LEDS */
#endif /* B43_LEDS_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e789792a36b..df6b26a0c05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -8,6 +8,9 @@
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
+ SDIO support
+ Copyright (c) 2009 Albert Herranz <albert_herranz@yahoo.es>
+
Some parts of the code in this file are derived from the ipw2200
driver Copyright(c) 2003 - 2004 Intel Corporation.
@@ -53,6 +56,8 @@
#include "xmit.h"
#include "lo.h"
#include "pcmcia.h"
+#include "sdio.h"
+#include <linux/mmc/sdio_func.h>
MODULE_DESCRIPTION("Broadcom B43 wireless driver");
MODULE_AUTHOR("Martin Langer");
@@ -1587,7 +1592,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work)
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
- if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
/* wl->mutex is enough. */
b43_do_beacon_update_trigger_work(dev);
mmiowb();
@@ -1825,6 +1830,16 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
/* Re-enable interrupts on the device by restoring the current interrupt mask. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
+
+#if B43_DEBUG
+ if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
+ dev->irq_count++;
+ for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
+ if (reason & (1 << i))
+ dev->irq_bit_count[i]++;
+ }
+ }
+#endif
}
/* Interrupt thread handler. Handles device interrupts in thread context. */
@@ -1905,6 +1920,21 @@ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id)
return ret;
}
+/* SDIO interrupt handler. This runs in process context. */
+static void b43_sdio_interrupt_handler(struct b43_wldev *dev)
+{
+ struct b43_wl *wl = dev->wl;
+ irqreturn_t ret;
+
+ mutex_lock(&wl->mutex);
+
+ ret = b43_do_interrupt(dev);
+ if (ret == IRQ_WAKE_THREAD)
+ b43_do_interrupt_thread(dev);
+
+ mutex_unlock(&wl->mutex);
+}
+
void b43_do_release_fw(struct b43_firmware_file *fw)
{
release_firmware(fw->data);
@@ -2645,6 +2675,20 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
cfp_pretbtt = 50;
}
b43_write16(dev, 0x612, cfp_pretbtt);
+
+ /* FIXME: We don't currently implement the PMQ mechanism,
+ * so always disable it. If we want to implement PMQ,
+ * we need to enable it here (clear DISCPMQ) in AP mode.
+ */
+ if (0 /* ctl & B43_MACCTL_AP */) {
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL)
+ & ~B43_MACCTL_DISCPMQ);
+ } else {
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL)
+ | B43_MACCTL_DISCPMQ);
+ }
}
static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm)
@@ -2873,6 +2917,27 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
wmb();
+
+#if B43_DEBUG
+ if (b43_debug(dev, B43_DBG_VERBOSESTATS)) {
+ unsigned int i;
+
+ b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n",
+ dev->irq_count / 15,
+ dev->tx_count / 15,
+ dev->rx_count / 15);
+ dev->irq_count = 0;
+ dev->tx_count = 0;
+ dev->rx_count = 0;
+ for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) {
+ if (dev->irq_bit_count[i]) {
+ b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n",
+ dev->irq_bit_count[i] / 15, i, (1 << i));
+ dev->irq_bit_count[i] = 0;
+ }
+ }
+ }
+#endif
}
static void do_periodic_work(struct b43_wldev *dev)
@@ -3002,14 +3067,18 @@ static void b43_security_init(struct b43_wldev *dev)
static int b43_rng_read(struct hwrng *rng, u32 *data)
{
struct b43_wl *wl = (struct b43_wl *)rng->priv;
+ struct b43_wldev *dev;
+ int count = -ENODEV;
- /* FIXME: We need to take wl->mutex here to make sure the device
- * is not going away from under our ass. However it could deadlock
- * with hwrng internal locking. */
-
- *data = b43_read16(wl->current_dev, B43_MMIO_RNG);
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) {
+ *data = b43_read16(dev, B43_MMIO_RNG);
+ count = sizeof(u16);
+ }
+ mutex_unlock(&wl->mutex);
- return (sizeof(u16));
+ return count;
}
#endif /* CONFIG_B43_HWRNG */
@@ -3068,6 +3137,9 @@ static void b43_tx_work(struct work_struct *work)
dev_kfree_skb(skb); /* Drop it */
}
+#if B43_DEBUG
+ dev->tx_count++;
+#endif
mutex_unlock(&wl->mutex);
}
@@ -3802,6 +3874,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
{
struct b43_wl *wl = dev->wl;
struct b43_wldev *orig_dev;
+ u32 mask;
redo:
if (!dev || b43_status(dev) < B43_STAT_STARTED)
@@ -3820,7 +3893,7 @@ redo:
/* Disable interrupts on the device. */
b43_set_status(dev, B43_STAT_INITIALIZED);
- if (0 /*FIXME dev->dev->bus->bustype == SSB_BUSTYPE_SDIO*/) {
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
/* wl->mutex is locked. That is enough. */
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0);
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
@@ -3830,10 +3903,15 @@ redo:
b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */
spin_unlock_irq(&wl->hardirq_lock);
}
- /* Synchronize the interrupt handlers. Unlock to avoid deadlocks. */
+ /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
orig_dev = dev;
mutex_unlock(&wl->mutex);
- synchronize_irq(dev->dev->irq);
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ b43_sdio_free_irq(dev);
+ } else {
+ synchronize_irq(dev->dev->irq);
+ free_irq(dev->dev->irq, dev);
+ }
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev)
@@ -3843,14 +3921,15 @@ redo:
goto redo;
return dev;
}
- B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK));
+ mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
+ B43_WARN_ON(mask != 0xFFFFFFFF && mask);
/* Drain the TX queue */
while (skb_queue_len(&wl->tx_queue))
dev_kfree_skb(skb_dequeue(&wl->tx_queue));
b43_mac_suspend(dev);
- free_irq(dev->dev->irq, dev);
+ b43_leds_exit(dev);
b43dbg(wl, "Wireless interface stopped\n");
return dev;
@@ -3864,12 +3943,20 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED);
drain_txstatus_queue(dev);
- err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
- b43_interrupt_thread_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (err) {
- b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
- goto out;
+ if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) {
+ err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler);
+ if (err) {
+ b43err(dev->wl, "Cannot request SDIO IRQ\n");
+ goto out;
+ }
+ } else {
+ err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler,
+ b43_interrupt_thread_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (err) {
+ b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq);
+ goto out;
+ }
}
/* We are ready to run. */
@@ -3882,8 +3969,10 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
/* Start maintainance work */
b43_periodic_tasks_setup(dev);
+ b43_leds_init(dev);
+
b43dbg(dev->wl, "Wireless interface started\n");
- out:
+out:
return err;
}
@@ -4160,10 +4249,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
macctl |= B43_MACCTL_PSM_JMP0;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
- if (!dev->suspend_in_progress) {
- b43_leds_exit(dev);
- b43_rng_exit(dev->wl);
- }
b43_dma_free(dev);
b43_pio_free(dev);
b43_chip_exit(dev);
@@ -4180,7 +4265,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
/* Initialize a wireless core */
static int b43_wireless_core_init(struct b43_wldev *dev)
{
- struct b43_wl *wl = dev->wl;
struct ssb_bus *bus = dev->dev->bus;
struct ssb_sprom *sprom = &bus->sprom;
struct b43_phy *phy = &dev->phy;
@@ -4264,7 +4348,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
/* Maximum Contention Window */
b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
- if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || B43_FORCE_PIO) {
+ if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
+ (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
+ B43_FORCE_PIO) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4280,15 +4366,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW));
b43_upload_card_macaddress(dev);
b43_security_init(dev);
- if (!dev->suspend_in_progress)
- b43_rng_init(wl);
+
+ ieee80211_wake_queues(dev->wl->hw);
ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_INITIALIZED);
- if (!dev->suspend_in_progress)
- b43_leds_init(dev);
out:
return err;
@@ -4417,6 +4501,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&(wl->beacon_update_trigger));
+ wiphy_rfkill_stop_polling(hw->wiphy);
mutex_lock(&wl->mutex);
if (b43_status(dev) >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
@@ -4837,7 +4922,6 @@ static int b43_wireless_init(struct ssb_device *dev)
/* Initialize struct b43_wl */
wl->hw = hw;
- spin_lock_init(&wl->leds_lock);
mutex_init(&wl->mutex);
spin_lock_init(&wl->hardirq_lock);
INIT_LIST_HEAD(&wl->devlist);
@@ -4878,6 +4962,8 @@ static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id)
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
+ b43_leds_register(wl->current_dev);
+ b43_rng_init(wl);
}
out:
@@ -4906,12 +4992,15 @@ static void b43_remove(struct ssb_device *dev)
* might have modified it. Restoring is important, so the networking
* stack can properly free resources. */
wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
b43_one_core_detach(dev);
if (list_empty(&wl->devlist)) {
+ b43_rng_exit(wl);
+ b43_leds_unregister(wl);
/* Last core on the chip unregistered.
* We can destroy common struct b43_wl.
*/
@@ -4929,80 +5018,17 @@ void b43_controller_restart(struct b43_wldev *dev, const char *reason)
ieee80211_queue_work(dev->wl->hw, &dev->restart_work);
}
-#ifdef CONFIG_PM
-
-static int b43_suspend(struct ssb_device *dev, pm_message_t state)
-{
- struct b43_wldev *wldev = ssb_get_drvdata(dev);
- struct b43_wl *wl = wldev->wl;
-
- b43dbg(wl, "Suspending...\n");
-
- mutex_lock(&wl->mutex);
- wldev->suspend_in_progress = true;
- wldev->suspend_init_status = b43_status(wldev);
- if (wldev->suspend_init_status >= B43_STAT_STARTED)
- wldev = b43_wireless_core_stop(wldev);
- if (wldev && wldev->suspend_init_status >= B43_STAT_INITIALIZED)
- b43_wireless_core_exit(wldev);
- mutex_unlock(&wl->mutex);
-
- b43dbg(wl, "Device suspended.\n");
-
- return 0;
-}
-
-static int b43_resume(struct ssb_device *dev)
-{
- struct b43_wldev *wldev = ssb_get_drvdata(dev);
- struct b43_wl *wl = wldev->wl;
- int err = 0;
-
- b43dbg(wl, "Resuming...\n");
-
- mutex_lock(&wl->mutex);
- if (wldev->suspend_init_status >= B43_STAT_INITIALIZED) {
- err = b43_wireless_core_init(wldev);
- if (err) {
- b43err(wl, "Resume failed at core init\n");
- goto out;
- }
- }
- if (wldev->suspend_init_status >= B43_STAT_STARTED) {
- err = b43_wireless_core_start(wldev);
- if (err) {
- b43_leds_exit(wldev);
- b43_rng_exit(wldev->wl);
- b43_wireless_core_exit(wldev);
- b43err(wl, "Resume failed at core start\n");
- goto out;
- }
- }
- b43dbg(wl, "Device resumed.\n");
- out:
- wldev->suspend_in_progress = false;
- mutex_unlock(&wl->mutex);
- return err;
-}
-
-#else /* CONFIG_PM */
-# define b43_suspend NULL
-# define b43_resume NULL
-#endif /* CONFIG_PM */
-
static struct ssb_driver b43_ssb_driver = {
.name = KBUILD_MODNAME,
.id_table = b43_ssb_tbl,
.probe = b43_probe,
.remove = b43_remove,
- .suspend = b43_suspend,
- .resume = b43_resume,
};
static void b43_print_driverinfo(void)
{
const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
- *feat_leds = "";
+ *feat_leds = "", *feat_sdio = "";
#ifdef CONFIG_B43_PCI_AUTOSELECT
feat_pci = "P";
@@ -5016,11 +5042,14 @@ static void b43_print_driverinfo(void)
#ifdef CONFIG_B43_LEDS
feat_leds = "L";
#endif
+#ifdef CONFIG_B43_SDIO
+ feat_sdio = "S";
+#endif
printk(KERN_INFO "Broadcom 43xx driver loaded "
- "[ Features: %s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s%s, Firmware-ID: "
B43_SUPPORTED_FIRMWARE_ID " ]\n",
feat_pci, feat_pcmcia, feat_nphy,
- feat_leds);
+ feat_leds, feat_sdio);
}
static int __init b43_init(void)
@@ -5031,13 +5060,18 @@ static int __init b43_init(void)
err = b43_pcmcia_init();
if (err)
goto err_dfs_exit;
- err = ssb_driver_register(&b43_ssb_driver);
+ err = b43_sdio_init();
if (err)
goto err_pcmcia_exit;
+ err = ssb_driver_register(&b43_ssb_driver);
+ if (err)
+ goto err_sdio_exit;
b43_print_driverinfo();
return err;
+err_sdio_exit:
+ b43_sdio_exit();
err_pcmcia_exit:
b43_pcmcia_exit();
err_dfs_exit:
@@ -5048,6 +5082,7 @@ err_dfs_exit:
static void __exit b43_exit(void)
{
ssb_driver_unregister(&b43_ssb_driver);
+ b43_sdio_exit();
b43_pcmcia_exit();
b43_debugfs_exit();
}
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e02d969f68..1e318d815a5 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -2228,6 +2228,16 @@ static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
return B43_TXPWR_RES_DONE;
}
+void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
+{
+ if (on) {
+ b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8);
+ } else {
+ b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007);
+ b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007);
+ }
+}
+
const struct b43_phy_operations b43_phyops_lp = {
.allocate = b43_lpphy_op_allocate,
.free = b43_lpphy_op_free,
@@ -2239,7 +2249,7 @@ const struct b43_phy_operations b43_phyops_lp = {
.radio_read = b43_lpphy_op_radio_read,
.radio_write = b43_lpphy_op_radio_write,
.software_rfkill = b43_lpphy_op_software_rfkill,
- .switch_analog = b43_phyop_switch_analog_generic,
+ .switch_analog = b43_lpphy_op_switch_analog,
.switch_channel = b43_lpphy_op_switch_channel,
.get_default_chan = b43_lpphy_op_get_default_chan,
.set_rx_antenna = b43_lpphy_op_set_rx_antenna,
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 3498b68385e..9b904440021 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -30,6 +30,7 @@
#include "xmit.h"
#include <linux/delay.h>
+#include <linux/sched.h>
static u16 generate_cookie(struct b43_pio_txqueue *q,
@@ -331,6 +332,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
unsigned int data_len)
{
struct b43_wldev *dev = q->dev;
+ struct b43_wl *wl = dev->wl;
const u8 *data = _data;
ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
@@ -343,7 +345,11 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
/* Write the last byte. */
ctl &= ~B43_PIO_TXCTL_WRITEHI;
b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
- b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
+ wl->tx_tail[0] = data[data_len - 1];
+ wl->tx_tail[1] = 0;
+ ssb_block_write(dev->dev, wl->tx_tail, 2,
+ q->mmio_base + B43_PIO_TXDATA,
+ sizeof(u16));
}
return ctl;
@@ -376,6 +382,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
unsigned int data_len)
{
struct b43_wldev *dev = q->dev;
+ struct b43_wl *wl = dev->wl;
const u8 *data = _data;
ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
@@ -386,26 +393,33 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
q->mmio_base + B43_PIO8_TXDATA,
sizeof(u32));
if (data_len & 3) {
- u32 value = 0;
-
+ wl->tx_tail[3] = 0;
/* Write the last few bytes. */
ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
B43_PIO8_TXCTL_24_31);
- data = &(data[data_len - 1]);
switch (data_len & 3) {
case 3:
- ctl |= B43_PIO8_TXCTL_16_23;
- value |= (u32)(*data) << 16;
- data--;
+ ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
+ wl->tx_tail[0] = data[data_len - 3];
+ wl->tx_tail[1] = data[data_len - 2];
+ wl->tx_tail[2] = data[data_len - 1];
+ break;
case 2:
ctl |= B43_PIO8_TXCTL_8_15;
- value |= (u32)(*data) << 8;
- data--;
+ wl->tx_tail[0] = data[data_len - 2];
+ wl->tx_tail[1] = data[data_len - 1];
+ wl->tx_tail[2] = 0;
+ break;
case 1:
- value |= (u32)(*data);
+ wl->tx_tail[0] = data[data_len - 1];
+ wl->tx_tail[1] = 0;
+ wl->tx_tail[2] = 0;
+ break;
}
b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
- b43_piotx_write32(q, B43_PIO8_TXDATA, value);
+ ssb_block_write(dev->dev, wl->tx_tail, 4,
+ q->mmio_base + B43_PIO8_TXDATA,
+ sizeof(u32));
}
return ctl;
@@ -435,8 +449,9 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
static int pio_tx_frame(struct b43_pio_txqueue *q,
struct sk_buff *skb)
{
+ struct b43_wldev *dev = q->dev;
+ struct b43_wl *wl = dev->wl;
struct b43_pio_txpacket *pack;
- struct b43_txhdr txhdr;
u16 cookie;
int err;
unsigned int hdrlen;
@@ -447,8 +462,8 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
struct b43_pio_txpacket, list);
cookie = generate_cookie(q, pack);
- hdrlen = b43_txhdr_size(q->dev);
- err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb,
+ hdrlen = b43_txhdr_size(dev);
+ err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb,
info, cookie);
if (err)
return err;
@@ -456,15 +471,15 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
/* Tell the firmware about the cookie of the last
* mcast frame, so it can clear the more-data bit in it. */
- b43_shm_write16(q->dev, B43_SHM_SHARED,
+ b43_shm_write16(dev, B43_SHM_SHARED,
B43_SHM_SH_MCASTCOOKIE, cookie);
}
pack->skb = skb;
if (q->rev >= 8)
- pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen);
+ pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
else
- pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen);
+ pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen);
/* Remove it from the list of available packet slots.
* It will be put back when we receive the status report. */
@@ -574,7 +589,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
q->buffer_used -= total_len;
q->free_packet_slots += 1;
- ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
+ ieee80211_tx_status(dev->wl->hw, pack->skb);
pack->skb = NULL;
list_add(&pack->list, &q->packets_list);
@@ -604,14 +619,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev,
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
struct b43_wldev *dev = q->dev;
- struct b43_rxhdr_fw4 rxhdr;
+ struct b43_wl *wl = dev->wl;
u16 len;
u32 macstat;
unsigned int i, padding;
struct sk_buff *skb;
const char *err_msg = NULL;
- memset(&rxhdr, 0, sizeof(rxhdr));
+ memset(&wl->rxhdr, 0, sizeof(wl->rxhdr));
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@@ -649,16 +664,16 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
- ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
+ ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
- ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
+ ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr),
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}
/* Sanity checks. */
- len = le16_to_cpu(rxhdr.frame_len);
+ len = le16_to_cpu(wl->rxhdr.frame_len);
if (unlikely(len > 0x700)) {
err_msg = "len > 0x700";
goto rx_error;
@@ -668,7 +683,7 @@ data_ready:
goto rx_error;
}
- macstat = le32_to_cpu(rxhdr.mac_status);
+ macstat = le32_to_cpu(wl->rxhdr.mac_status);
if (macstat & B43_RX_MAC_FCSERR) {
if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
/* Drop frames with failed FCS. */
@@ -693,21 +708,23 @@ data_ready:
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
if (len & 3) {
- u32 value;
- char *data;
-
/* Read the last few bytes. */
- value = b43_piorx_read32(q, B43_PIO8_RXDATA);
- data = &(skb->data[len + padding - 1]);
+ ssb_block_read(dev->dev, wl->rx_tail, 4,
+ q->mmio_base + B43_PIO8_RXDATA,
+ sizeof(u32));
switch (len & 3) {
case 3:
- *data = (value >> 16);
- data--;
+ skb->data[len + padding - 3] = wl->rx_tail[0];
+ skb->data[len + padding - 2] = wl->rx_tail[1];
+ skb->data[len + padding - 1] = wl->rx_tail[2];
+ break;
case 2:
- *data = (value >> 8);
- data--;
+ skb->data[len + padding - 2] = wl->rx_tail[0];
+ skb->data[len + padding - 1] = wl->rx_tail[1];
+ break;
case 1:
- *data = value;
+ skb->data[len + padding - 1] = wl->rx_tail[0];
+ break;
}
}
} else {
@@ -715,15 +732,15 @@ data_ready:
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
if (len & 1) {
- u16 value;
-
/* Read the last byte. */
- value = b43_piorx_read16(q, B43_PIO_RXDATA);
- skb->data[len + padding - 1] = value;
+ ssb_block_read(dev->dev, wl->rx_tail, 2,
+ q->mmio_base + B43_PIO_RXDATA,
+ sizeof(u16));
+ skb->data[len + padding - 1] = wl->rx_tail[0];
}
}
- b43_rx(q->dev, skb, &rxhdr);
+ b43_rx(q->dev, skb, &wl->rxhdr);
return 1;
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 31e55999893..7a3218c5ba7 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,7 +28,7 @@
/* Returns TRUE, if the radio is enabled in hardware. */
bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
{
- if (dev->phy.rev >= 3) {
+ if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) {
if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
& B43_MMIO_RADIO_HWENABLED_HI_MASK))
return 1;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
new file mode 100644
index 00000000000..0d3ac64147a
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.c
@@ -0,0 +1,202 @@
+/*
+ * Broadcom B43 wireless driver
+ *
+ * SDIO over Sonics Silicon Backplane bus glue for b43.
+ *
+ * Copyright (C) 2009 Albert Herranz
+ * Copyright (C) 2009 Michael Buesch <mb@bu3sch.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/ssb/ssb.h>
+
+#include "sdio.h"
+#include "b43.h"
+
+
+#define HNBU_CHIPID 0x01 /* vendor & device id */
+
+#define B43_SDIO_BLOCK_SIZE 64 /* rx fifo max size in bytes */
+
+
+static const struct b43_sdio_quirk {
+ u16 vendor;
+ u16 device;
+ unsigned int quirks;
+} b43_sdio_quirks[] = {
+ { 0x14E4, 0x4318, SSB_QUIRK_SDIO_READ_AFTER_WRITE32, },
+ { },
+};
+
+
+static unsigned int b43_sdio_get_quirks(u16 vendor, u16 device)
+{
+ const struct b43_sdio_quirk *q;
+
+ for (q = b43_sdio_quirks; q->quirks; q++) {
+ if (vendor == q->vendor && device == q->device)
+ return q->quirks;
+ }
+
+ return 0;
+}
+
+static void b43_sdio_interrupt_dispatcher(struct sdio_func *func)
+{
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+ struct b43_wldev *dev = sdio->irq_handler_opaque;
+
+ if (unlikely(b43_status(dev) < B43_STAT_STARTED))
+ return;
+
+ sdio_release_host(func);
+ sdio->irq_handler(dev);
+ sdio_claim_host(func);
+}
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev))
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct sdio_func *func = bus->host_sdio;
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+ int err;
+
+ sdio->irq_handler_opaque = dev;
+ sdio->irq_handler = handler;
+ sdio_claim_host(func);
+ err = sdio_claim_irq(func, b43_sdio_interrupt_dispatcher);
+ sdio_release_host(func);
+
+ return err;
+}
+
+void b43_sdio_free_irq(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct sdio_func *func = bus->host_sdio;
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+
+ sdio_claim_host(func);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+ sdio->irq_handler_opaque = NULL;
+ sdio->irq_handler = NULL;
+}
+
+static int b43_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct b43_sdio *sdio;
+ struct sdio_func_tuple *tuple;
+ u16 vendor = 0, device = 0;
+ int error;
+
+ /* Look for the card chip identifier. */
+ tuple = func->tuples;
+ while (tuple) {
+ switch (tuple->code) {
+ case 0x80:
+ switch (tuple->data[0]) {
+ case HNBU_CHIPID:
+ if (tuple->size != 5)
+ break;
+ vendor = tuple->data[1] | (tuple->data[2]<<8);
+ device = tuple->data[3] | (tuple->data[4]<<8);
+ dev_info(&func->dev, "Chip ID %04x:%04x\n",
+ vendor, device);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ tuple = tuple->next;
+ }
+ if (!vendor || !device) {
+ error = -ENODEV;
+ goto out;
+ }
+
+ sdio_claim_host(func);
+ error = sdio_set_block_size(func, B43_SDIO_BLOCK_SIZE);
+ if (error) {
+ dev_err(&func->dev, "failed to set block size to %u bytes,"
+ " error %d\n", B43_SDIO_BLOCK_SIZE, error);
+ goto err_release_host;
+ }
+ error = sdio_enable_func(func);
+ if (error) {
+ dev_err(&func->dev, "failed to enable func, error %d\n", error);
+ goto err_release_host;
+ }
+ sdio_release_host(func);
+
+ sdio = kzalloc(sizeof(*sdio), GFP_KERNEL);
+ if (!sdio) {
+ error = -ENOMEM;
+ dev_err(&func->dev, "failed to allocate ssb bus\n");
+ goto err_disable_func;
+ }
+ error = ssb_bus_sdiobus_register(&sdio->ssb, func,
+ b43_sdio_get_quirks(vendor, device));
+ if (error) {
+ dev_err(&func->dev, "failed to register ssb sdio bus,"
+ " error %d\n", error);
+ goto err_free_ssb;
+ }
+ sdio_set_drvdata(func, sdio);
+
+ return 0;
+
+err_free_ssb:
+ kfree(sdio);
+err_disable_func:
+ sdio_disable_func(func);
+err_release_host:
+ sdio_release_host(func);
+out:
+ return error;
+}
+
+static void b43_sdio_remove(struct sdio_func *func)
+{
+ struct b43_sdio *sdio = sdio_get_drvdata(func);
+
+ ssb_bus_unregister(&sdio->ssb);
+ sdio_disable_func(func);
+ kfree(sdio);
+ sdio_set_drvdata(func, NULL);
+}
+
+static const struct sdio_device_id b43_sdio_ids[] = {
+ { SDIO_DEVICE(0x02d0, 0x044b) }, /* Nintendo Wii WLAN daughter card */
+ { },
+};
+
+static struct sdio_driver b43_sdio_driver = {
+ .name = "b43-sdio",
+ .id_table = b43_sdio_ids,
+ .probe = b43_sdio_probe,
+ .remove = b43_sdio_remove,
+};
+
+int b43_sdio_init(void)
+{
+ return sdio_register_driver(&b43_sdio_driver);
+}
+
+void b43_sdio_exit(void)
+{
+ sdio_unregister_driver(&b43_sdio_driver);
+}
diff --git a/drivers/net/wireless/b43/sdio.h b/drivers/net/wireless/b43/sdio.h
new file mode 100644
index 00000000000..fb633094403
--- /dev/null
+++ b/drivers/net/wireless/b43/sdio.h
@@ -0,0 +1,45 @@
+#ifndef B43_SDIO_H_
+#define B43_SDIO_H_
+
+#include <linux/ssb/ssb.h>
+
+struct b43_wldev;
+
+
+#ifdef CONFIG_B43_SDIO
+
+struct b43_sdio {
+ struct ssb_bus ssb;
+ void *irq_handler_opaque;
+ void (*irq_handler)(struct b43_wldev *dev);
+};
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev));
+void b43_sdio_free_irq(struct b43_wldev *dev);
+
+int b43_sdio_init(void);
+void b43_sdio_exit(void);
+
+
+#else /* CONFIG_B43_SDIO */
+
+
+int b43_sdio_request_irq(struct b43_wldev *dev,
+ void (*handler)(struct b43_wldev *dev))
+{
+ return -ENODEV;
+}
+void b43_sdio_free_irq(struct b43_wldev *dev)
+{
+}
+static inline int b43_sdio_init(void)
+{
+ return 0;
+}
+static inline void b43_sdio_exit(void)
+{
+}
+
+#endif /* CONFIG_B43_SDIO */
+#endif /* B43_SDIO_H_ */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 14f541248b5..f4e9695ec18 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
*/
-#include "xmit.h"
+#include "b43.h"
#include "phy_common.h"
#include "dma.h"
#include "pio.h"
@@ -690,8 +690,14 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
- ieee80211_rx_irqsafe(dev->wl->hw, skb);
+ local_bh_disable();
+ ieee80211_rx(dev->wl->hw, skb);
+ local_bh_enable();
+
+#if B43_DEBUG
+ dev->rx_count++;
+#endif
return;
drop:
b43dbg(dev->wl, "RX: Packet dropped\n");
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b3d4c..4b60148a5e6 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -37,6 +37,7 @@
#include <linux/firmware.h>
#include <linux/wireless.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <net/dst.h>
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 11319ec2d64..aaf227203a9 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/sched.h>
#include <linux/types.h>
#include "b43legacy.h"
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 6fa14a4e4b5..4dfb40a84c9 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -1,6 +1,7 @@
/* Host AP driver Info Frame processing (part of hostap.o module) */
#include <linux/if_arp.h>
+#include <linux/sched.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3f2bda881a4..9419cebca8a 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1,6 +1,7 @@
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
#include <linux/types.h>
+#include <linux/sched.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <net/lib80211.h>
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d58e6ed4e7..827824d45de 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -30,6 +30,7 @@
******************************************************************************/
+#include <linux/sched.h>
#include "ipw2200.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index a95caa01414..2716b91ba9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -99,6 +99,8 @@ static struct iwl_lib_ops iwl1000_lib = {
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index a16bd4147ea..cbb0585083a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -702,7 +702,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
u8 sta_id = iwl_find_station(priv, hdr->addr1);
if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n",
+ IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
hdr->addr1);
sta_id = iwl_add_station(priv, hdr->addr1, false,
CMD_ASYNC, NULL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index e9a685d8e3a..f059b49dc69 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -610,7 +611,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
if (rx_status.band == IEEE80211_BAND_5GHZ)
rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
- rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags &
+ rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
/* set the preamble flag if appropriate */
@@ -2839,6 +2840,8 @@ static struct iwl_lib_ops iwl3945_lib = {
.txq_free_tfd = iwl3945_hw_txq_free_tfd,
.txq_init = iwl3945_hw_tx_queue_init,
.load_ucode = iwl3945_load_bsm,
+ .dump_nic_event_log = iwl3945_dump_nic_event_log,
+ .dump_nic_error_log = iwl3945_dump_nic_error_log,
.apm_ops = {
.init = iwl3945_apm_init,
.reset = iwl3945_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index f2403690991..21679bf3a1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -209,6 +209,8 @@ extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
struct iwl_host_cmd *cmd);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,int left);
+extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv);
+extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/*
* Currently used by iwl-3945-rs... look at restructuring so that it doesn't
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index ca61d3796ce..6f703a04184 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -2021,6 +2022,12 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
@@ -2292,6 +2299,8 @@ static struct iwl_lib_ops iwl4965_lib = {
.alive_notify = iwl4965_alive_notify,
.init_alive_start = iwl4965_init_alive_start,
.load_ucode = iwl4965_load_bsm,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
.apm_ops = {
.init = iwl4965_apm_init,
.reset = iwl4965_apm_reset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1d539e3b8db..6e6f516ba40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -317,7 +318,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
(s32)average_noise[i])) / 1500;
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
- min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+ min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
/* set negative sign */
@@ -1163,6 +1164,12 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
+ if (!hdr) {
+ IWL_ERR(priv,
+ "BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
@@ -1529,6 +1536,8 @@ struct iwl_lib_ops iwl5000_lib = {
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1579,6 +1588,8 @@ static struct iwl_lib_ops iwl5150_lib = {
.rx_handler_setup = iwl5000_rx_handler_setup,
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 82b9c93dff5..c295b8ee922 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -100,6 +100,8 @@ static struct iwl_lib_ops iwl6000_lib = {
.setup_deferred_work = iwl5000_setup_deferred_work,
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.load_ucode = iwl5000_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 00457bff1ed..eaafae091f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -1526,6 +1527,191 @@ static int iwl_read_ucode(struct iwl_priv *priv)
return ret;
}
+#ifdef CONFIG_IWLWIFI_DEBUG
+static const char *desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STABLE",
+ "FH_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT"
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+ "UNKNOWN"
+};
+
+static const char *desc_lookup(int i)
+{
+ int max = ARRAY_SIZE(desc_lookup_text) - 1;
+
+ if (i < 0 || i > max)
+ i = max;
+
+ return desc_lookup_text[i];
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
+ return;
+ }
+
+ count = iwl_read_targ_mem(priv, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IWL_ERR(priv, "Start IWL Error Log Dump:\n");
+ IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
+ priv->status, count);
+ }
+
+ desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
+ blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
+ blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
+ ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
+ ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
+ data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
+ data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
+ line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
+ time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
+
+ IWL_ERR(priv, "Desc Time "
+ "data1 data2 line\n");
+ IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
+ desc_lookup(desc), desc, time, data1, data2, line);
+ IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
+ IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
+ ilink1, ilink2);
+
+}
+
+#define EVENT_START_OFFSET (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode)
+{
+ u32 i;
+ u32 base; /* SRAM byte address of event log header */
+ u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+
+ if (num_events == 0)
+ return;
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+
+ if (mode == 0)
+ event_size = 2 * sizeof(u32);
+ else
+ event_size = 3 * sizeof(u32);
+
+ ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+ /* "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing. */
+ for (i = 0; i < num_events; i++) {
+ ev = iwl_read_targ_mem(priv, ptr);
+ ptr += sizeof(u32);
+ time = iwl_read_targ_mem(priv, ptr);
+ ptr += sizeof(u32);
+ if (mode == 0) {
+ /* data, ev */
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+ } else {
+ data = iwl_read_targ_mem(priv, ptr);
+ ptr += sizeof(u32);
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ }
+ }
+}
+
+void iwl_dump_nic_event_log(struct iwl_priv *priv)
+{
+ u32 base; /* SRAM byte address of event log header */
+ u32 capacity; /* event log capacity in # entries */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+ u32 size; /* # entries that we'll print */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+
+ if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
+ return;
+ }
+
+ /* event log header */
+ capacity = iwl_read_targ_mem(priv, base);
+ mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+
+ size = num_wraps ? capacity : next_entry;
+
+ /* bail out if nothing in log */
+ if (size == 0) {
+ IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
+ return;
+ }
+
+ IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
+ size, num_wraps);
+
+ /* if uCode has wrapped back to top of log, start at the oldest entry,
+ * i.e the next one that uCode would fill. */
+ if (num_wraps)
+ iwl_print_event_log(priv, next_entry,
+ capacity - next_entry, mode);
+ /* (then/else) start at top of log */
+ iwl_print_event_log(priv, 0, next_entry, mode);
+
+}
+#endif
+
/**
* iwl_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
@@ -2920,8 +3106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_pci_disable_device:
pci_disable_device(pdev);
out_ieee80211_free_hw:
- ieee80211_free_hw(priv->hw);
iwl_free_traffic_mem(priv);
+ ieee80211_free_hw(priv->hw);
out:
return err;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 2c5c88fc38f..4afaf773aea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1154,7 +1154,7 @@ struct iwl_wep_cmd {
#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
-#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0
#define RX_RES_PHY_FLAGS_ANTENNA_POS 4
#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index fd26c0dc9c5..2dc92875545 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
@@ -1309,189 +1310,6 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
}
-
-static const char *desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT"
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
- "UNKNOWN"
-};
-
-static const char *desc_lookup(int i)
-{
- int max = ARRAY_SIZE(desc_lookup_text) - 1;
-
- if (i < 0 || i > max)
- i = max;
-
- return desc_lookup_text[i];
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-static void iwl_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 data2, line;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
-
- if (priv->ucode_type == UCODE_INIT)
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- else
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
- return;
- }
-
- count = iwl_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
- blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
- blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
- ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
- ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
- data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
- data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
- line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
- time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
-
- IWL_ERR(priv, "Desc Time "
- "data1 data2 line\n");
- IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
- desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
- ilink1, ilink2);
-
-}
-
-#define EVENT_START_OFFSET (4 * sizeof(u32))
-
-/**
- * iwl_print_event_log - Dump error event log to syslog
- *
- */
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
-{
- u32 i;
- u32 base; /* SRAM byte address of event log header */
- u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
- u32 ptr; /* SRAM byte address of log data */
- u32 ev, time, data; /* event log data */
-
- if (num_events == 0)
- return;
- if (priv->ucode_type == UCODE_INIT)
- base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- else
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
- if (mode == 0)
- event_size = 2 * sizeof(u32);
- else
- event_size = 3 * sizeof(u32);
-
- ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
-
- /* "time" is actually "data" for mode 0 (no timestamp).
- * place event id # at far right for easier visual parsing. */
- for (i = 0; i < num_events; i++) {
- ev = iwl_read_targ_mem(priv, ptr);
- ptr += sizeof(u32);
- time = iwl_read_targ_mem(priv, ptr);
- ptr += sizeof(u32);
- if (mode == 0) {
- /* data, ev */
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
- } else {
- data = iwl_read_targ_mem(priv, ptr);
- ptr += sizeof(u32);
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
- time, data, ev);
- }
- }
-}
-
-void iwl_dump_nic_event_log(struct iwl_priv *priv)
-{
- u32 base; /* SRAM byte address of event log header */
- u32 capacity; /* event log capacity in # entries */
- u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
- u32 num_wraps; /* # times uCode wrapped to top of log */
- u32 next_entry; /* index of next entry to be written by uCode */
- u32 size; /* # entries that we'll print */
-
- if (priv->ucode_type == UCODE_INIT)
- base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
- else
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
- }
-
- /* event log header */
- capacity = iwl_read_targ_mem(priv, base);
- mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
- num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
- next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
-
- size = num_wraps ? capacity : next_entry;
-
- /* bail out if nothing in log */
- if (size == 0) {
- IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
- }
-
- IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n",
- size, num_wraps);
-
- /* if uCode has wrapped back to top of log, start at the oldest entry,
- * i.e the next one that uCode would fill. */
- if (num_wraps)
- iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
- /* (then/else) start at top of log */
- iwl_print_event_log(priv, 0, next_entry, mode);
-
-}
#endif
/**
* iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1506,8 +1324,8 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) {
- iwl_dump_nic_error_log(priv);
- iwl_dump_nic_event_log(priv);
+ priv->cfg->ops->lib->dump_nic_error_log(priv);
+ priv->cfg->ops->lib->dump_nic_event_log(priv);
iwl_print_rx_config_cmd(priv);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7ff9ffb2b70..e50103a956b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -166,6 +166,8 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
+ void (*dump_nic_event_log)(struct iwl_priv *priv);
+ void (*dump_nic_error_log)(struct iwl_priv *priv);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -540,7 +542,19 @@ int iwl_pci_resume(struct pci_dev *pdev);
/*****************************************************
* Error Handling Debugging
******************************************************/
+#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_dump_nic_event_log(struct iwl_priv *priv);
+void iwl_dump_nic_error_log(struct iwl_priv *priv);
+#else
+static inline void iwl_dump_nic_event_log(struct iwl_priv *priv)
+{
+}
+
+static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+}
+#endif
+
void iwl_clear_isr_stats(struct iwl_priv *priv);
/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index fb844859a44..a198bcf6102 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -410,7 +410,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
buf_size - pos, 0);
- pos += strlen(buf);
+ pos += strlen(buf + pos);
if (buf_size - pos > 0)
buf[pos++] = '\n';
}
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- iwl_dump_nic_event_log(priv);
+ priv->cfg->ops->lib->dump_nic_event_log(priv);
return count;
}
@@ -909,7 +909,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
"0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
buf + pos, bufsz - pos, 0);
- pos += strlen(buf);
+ pos += strlen(buf + pos);
if (bufsz - pos > 0)
buf[pos++] = '\n';
}
@@ -932,7 +932,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
"0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
buf + pos, bufsz - pos, 0);
- pos += strlen(buf);
+ pos += strlen(buf + pos);
if (bufsz - pos > 0)
buf[pos++] = '\n';
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 3d2b93a61e6..e14c9952a93 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -410,7 +410,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
u16 *validblockaddr)
{
u16 next_link_addr = 0, link_value = 0, valid_addr;
- int ret = 0;
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
@@ -430,29 +429,29 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* check for more block on the link list
*/
valid_addr = next_link_addr;
- next_link_addr = link_value;
+ next_link_addr = link_value * sizeof(u16);
IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
- * reach the end of link list,
+ * reach the end of link list, return success and
* set address point to the starting address
* of the image
*/
- goto done;
+ *validblockaddr = valid_addr;
+ /* skip first 2 bytes (link list pointer) */
+ *validblockaddr += 2;
+ return 0;
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks < priv->cfg->max_ll_items);
- /* OTP full, use last block */
- IWL_DEBUG_INFO(priv, "OTP is full, use last block\n");
-done:
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return ret;
+ } while (usedblocks <= priv->cfg->max_ll_items);
+
+ /* OTP has no valid blocks */
+ IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n");
+ return -EINVAL;
}
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 6b68db7b1b8..80b9e45d9b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -220,35 +220,35 @@ struct iwl_eeprom_enhanced_txpwr {
* Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_)
*/
/* 2.4 GHz band: CCK */
-#define EEPROM_LB_CCK_20_COMMON ((0xAA)\
+#define EEPROM_LB_CCK_20_COMMON ((0xA8)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */
/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
-#define EEPROM_LB_OFDM_COMMON ((0xB2)\
+#define EEPROM_LB_OFDM_COMMON ((0xB0)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
-#define EEPROM_HB_OFDM_COMMON ((0xCA)\
+#define EEPROM_HB_OFDM_COMMON ((0xC8)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
/* 2.4GHz band channels:
* 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */
-#define EEPROM_LB_OFDM_20_BAND ((0xE2)\
+#define EEPROM_LB_OFDM_20_BAND ((0xE0)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */
/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */
-#define EEPROM_LB_OFDM_HT40_BAND ((0x122)\
+#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */
/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */
-#define EEPROM_HB_OFDM_20_BAND ((0x14A)\
+#define EEPROM_HB_OFDM_20_BAND ((0x148)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */
/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */
-#define EEPROM_HB_OFDM_HT40_BAND ((0x17A)\
+#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
/* 2.4 GHz band, channnel 13: Legacy, HT */
-#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x192)\
+#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
/* 5.2 GHz band, channnel 140: Legacy, HT */
-#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A2)\
+#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */
-#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B2)\
+#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\
| INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6cd8d..a6856daf14c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
#include "iwl-dev.h" /* FIXME: remove */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index b90adcb73b0..493626bcd3e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -250,12 +250,20 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
}
spin_unlock_irqrestore(&rxq->lock, flags);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ priority |= __GFP_NOWARN;
/* Alloc a new receive buffer */
skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
priority);
if (!skb) {
- IWL_CRIT(priv, "Can not allocate SKB buffers\n");
+ if (net_ratelimit())
+ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
@@ -1036,7 +1044,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
* as a bitmask.
*/
rx_status.antenna =
- le16_to_cpu(phy_res->phy_flags & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index a2b9ec82b96..c6633fec821 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -520,7 +520,7 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.data = wep_cmd,
- .flags = CMD_SYNC,
+ .flags = CMD_ASYNC,
};
memset(wep_cmd, 0, cmd_size +
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a7422e52d88..fb9bcfa6d94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -28,6 +28,7 @@
*****************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
@@ -197,6 +198,12 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 090966837f3..d00a8033409 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
@@ -1146,11 +1147,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
}
spin_unlock_irqrestore(&rxq->lock, flags);
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ priority |= __GFP_NOWARN;
/* Alloc a new receive buffer */
skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
if (!skb) {
if (net_ratelimit())
- IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
+ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
+ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+ net_ratelimit())
+ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
+ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
* more buffers it will schedule replenish */
@@ -1474,6 +1482,7 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
tasklet_kill(&priv->irq_tasklet);
}
+#ifdef CONFIG_IWLWIFI_DEBUG
static const char *desc_lookup(int i)
{
switch (i) {
@@ -1497,7 +1506,7 @@ static const char *desc_lookup(int i)
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-static void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
+void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
{
u32 i;
u32 desc, time, count, base, data1;
@@ -1591,7 +1600,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
}
}
-static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
+void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1633,6 +1642,16 @@ static void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
iwl3945_print_event_log(priv, 0, next_entry, mode);
}
+#else
+void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
+{
+}
+
+void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
+{
+}
+
+#endif
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
{
@@ -3676,21 +3695,6 @@ static ssize_t dump_error_log(struct device *d,
static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
-static ssize_t dump_event_log(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
-
- if (p[0] == '1')
- iwl3945_dump_nic_event_log(priv);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
-
/*****************************************************************************
*
* driver setup and tear down
@@ -3735,7 +3739,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_antenna.attr,
&dev_attr_channels.attr,
&dev_attr_dump_errors.attr,
- &dev_attr_dump_events.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
@@ -4094,8 +4097,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
out_ieee80211_free_hw:
- ieee80211_free_hw(priv->hw);
iwl_free_traffic_mem(priv);
+ ieee80211_free_hw(priv->hw);
out:
return err;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0ac99..f3c55658225 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa2605..84158b6d35d 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -40,6 +40,7 @@
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/sched.h>
#include "iwm.h"
#include "bus.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index d668e475632..222eb2cf1b3 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/sched.h>
#include <linux/ieee80211.h>
#include <linux/wireless.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc1659..771a301003c 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 685098148e1..0a324dcd264 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -6,6 +6,7 @@
#include <net/iw_handler.h>
#include <net/lib80211.h>
#include <linux/kfifo.h>
+#include <linux/sched.h>
#include "host.h"
#include "hostcmd.h"
#include "decl.h"
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index c42d3faa266..23f684337fd 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -3,6 +3,7 @@
* responses as well as events generated by firmware.
*/
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/netdevice.h>
#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7a0a8..8c3766a6e8e 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -3,6 +3,7 @@
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include "hostcmd.h"
#include "radiotap.h"
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 896f532182f..38cfd79e059 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -631,6 +631,9 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
data->beacon_int = 1024 * info->beacon_int / 1000 * HZ / 1000;
if (WARN_ON(!data->beacon_int))
data->beacon_int = 1;
+ if (data->started)
+ mod_timer(&data->beacon_timer,
+ jiffies + data->beacon_int);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 4c97c6ad6f5..bc08464d832 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -19,6 +19,7 @@
*
*/
+#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index e26d7b3ceab..2505be56ae3 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -23,6 +23,7 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
+#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index f7c677e2094..69d2f882fd0 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -20,6 +20,7 @@
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/sched.h>
#include <asm/io.h>
#include <asm/system.h>
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7b3ee8c2eae..68bc9bb1dbf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
+#include <linux/sched.h>
#include <linux/uaccess.h>
#include "rt2x00.h"
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 5462cb5ad99..567f029a8cd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -380,7 +380,7 @@ static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb,
{
}
-static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, bool l2pad,
+static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
unsigned int header_length,
struct rxdone_entry_desc *rxdesc)
{
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 1cbd9b4a3ef..b8f5ee33445 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2381,6 +2381,7 @@ static struct usb_device_id rt73usb_device_table[] = {
/* Huawei-3Com */
{ USB_DEVICE(0x1472, 0x0009), USB_DEVICE_DATA(&rt73usb_ops) },
/* Hercules */
+ { USB_DEVICE(0x06f8, 0xe002), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x06f8, 0xe010), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x06f8, 0xe020), USB_DEVICE_DATA(&rt73usb_ops) },
/* Linksys */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 7b14d5bc63d..88060e11754 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,5 +1,5 @@
menuconfig WL12XX
- boolean "TI wl12xx driver support"
+ tristate "TI wl12xx driver support"
depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
---help---
This will enable TI wl12xx driver support. The drivers make
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 38688847d56..23a6a6d4863 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1070,7 +1070,7 @@ static int eject_installer(struct usb_interface *intf)
/* Find bulk out endpoint */
endpoint = &iface_desc->endpoint[1].desc;
- if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
+ if (usb_endpoint_dir_out(endpoint) &&
usb_endpoint_xfer_bulk(endpoint)) {
bulk_out_ep = endpoint->bEndpointAddress;
} else {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index dc22782633a..83a044dbd1d 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -134,18 +134,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
}
/* Enable the Rx interrupts for the first buffer */
- reg_data = in_be32(drvdata->base_addr + XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_RSR_OFFSET,
- reg_data | XEL_RSR_RECV_IE_MASK);
+ XEL_RSR_RECV_IE_MASK);
/* Enable the Rx interrupts for the second Buffer if
* configured in HW */
if (drvdata->rx_ping_pong != 0) {
- reg_data = in_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
out_be32(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET,
- reg_data | XEL_RSR_RECV_IE_MASK);
+ XEL_RSR_RECV_IE_MASK);
}
/* Enable the Global Interrupt Enable */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index a0384b6f09b..b4234733375 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -169,7 +169,6 @@ static void znet_tx_timeout (struct net_device *dev);
static int znet_request_resources (struct net_device *dev)
{
struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
goto failed;
@@ -187,13 +186,9 @@ static int znet_request_resources (struct net_device *dev)
free_sia:
release_region (znet->sia_base, znet->sia_size);
free_tx_dma:
- flags = claim_dma_lock();
free_dma (znet->tx_dma);
- release_dma_lock (flags);
free_rx_dma:
- flags = claim_dma_lock();
free_dma (znet->rx_dma);
- release_dma_lock (flags);
free_irq:
free_irq (dev->irq, dev);
failed:
@@ -203,14 +198,11 @@ static int znet_request_resources (struct net_device *dev)
static void znet_release_resources (struct net_device *dev)
{
struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
release_region (znet->sia_base, znet->sia_size);
release_region (dev->base_addr, znet->io_size);
- flags = claim_dma_lock();
free_dma (znet->tx_dma);
free_dma (znet->rx_dma);
- release_dma_lock (flags);
free_irq (dev->irq, dev);
}