aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig49
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c128
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c48
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h136
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h58
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c560
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h75
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h50
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c521
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c48
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c57
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c58
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h48
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h211
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c298
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h271
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c472
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c316
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c818
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1152
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3501
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c188
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c625
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h182
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1109
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h203
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c1194
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h22
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile36
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c978
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h110
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2197
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c624
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h236
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h169
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h75
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h94
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h4674
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h100
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1667
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h287
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1165
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h363
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h298
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c704
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c2475
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c781
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c226
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h205
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h109
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c223
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h89
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c70
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c151
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h50
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h91
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h173
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c377
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c156
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c785
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c124
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h22
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c54
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h23
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h25
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c122
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c445
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c282
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h78
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c343
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1231
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c124
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h75
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c235
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c129
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h111
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c441
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h265
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1633
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h33
143 files changed, 33039 insertions, 8339 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 149ac85b5f9..bb9f0ba9d16 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -220,12 +220,12 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
- tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
depends on PCI_MSI
---help---
- This driver supports Intel(R) 82599 virtual functions. For more
- information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
+ This driver supports Intel(R) PCI Express virtual functions for the
+ Intel(R) ixgbe driver. For more information on how to identify your
+ adapter, go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
@@ -243,6 +243,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
+ select PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -259,4 +260,44 @@ config I40E
To compile this driver as a module, choose M here. The module
will be called i40e.
+config I40E_VXLAN
+ bool "Virtual eXtensible Local Area Network Support"
+ default n
+ depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
+ ---help---
+ This allows one to create VXLAN virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. VXLAN is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to use Virtual eXtensible Local Area Network
+ (VXLAN) in the driver.
+
+config I40E_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default n
+ depends on I40E && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+
+ If unsure, say N.
+
+config I40EVF
+ tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) XL710 and X710 virtual functions.
+ For more information on how to identify your adapter, go to the
+ Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40evf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 5bae933efc7..cdbbca8a375 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_I40EVF) += i40evf/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada6e210279..9d979d7debe 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
* testing, ie sending frames with bad CRC.
*/
if (unlikely(skb->no_fcs))
- cb->command |= __constant_cpu_to_le16(cb_tx_nc);
+ cb->command |= cpu_to_le16(cb_tx_nc);
else
- cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
+ cb->command &= ~cpu_to_le16(cb_tx_nc);
/* interrupt every 16 packets regardless of delay */
if ((nic->cbs_avail & ~15) == nic->cbs_avail)
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL;
netdev->netdev_ops = &e100_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->ethtool_ops = &e100_ethtool_ops;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -2985,7 +2985,6 @@ err_out_free_res:
err_out_disable_pdev:
pci_disable_device(pdev);
err_out_free_dev:
- pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
}
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
}
}
@@ -3036,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 26d9cd59ec7..10a0f221b18 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -36,7 +36,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -83,6 +82,11 @@ struct e1000_adapter;
#define E1000_MAX_INTR 10
+/*
+ * Count for polling __E1000_RESET condition every 10-20msec.
+ */
+#define E1000_CHECK_RESET_COUNT 50
+
/* TX/RX descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 256
@@ -312,8 +316,6 @@ struct e1000_adapter {
struct delayed_work watchdog_task;
struct delayed_work fifo_stall_task;
struct delayed_work phy_info_task;
-
- struct mutex mutex;
};
enum e1000_state_t {
@@ -325,7 +327,7 @@ enum e1000_state_t {
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
#define e_dbg(format, arg...) \
netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
#define e_err(msglvl, format, arg...) \
@@ -346,20 +348,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aeefb92..d50f78afb56 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
- } while (good_cnt < 64 && jiffies < (time + 20));
+ } while (good_cnt < 64 && time_after(time + 20, jiffies));
+
if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 2879b9631e1..e9b07ccc0eb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -115,8 +115,6 @@ static DEFINE_SPINLOCK(e1000_phy_lock);
*/
static s32 e1000_set_phy_type(struct e1000_hw *hw)
{
- e_dbg("e1000_set_phy_type");
-
if (hw->mac_type == e1000_undefined)
return -E1000_ERR_PHY_TYPE;
@@ -159,8 +157,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
u32 ret_val;
u16 phy_saved_data;
- e_dbg("e1000_phy_init_script");
-
if (hw->phy_init_script) {
msleep(20);
@@ -253,8 +249,6 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
*/
s32 e1000_set_mac_type(struct e1000_hw *hw)
{
- e_dbg("e1000_set_mac_type");
-
switch (hw->device_id) {
case E1000_DEV_ID_82542:
switch (hw->revision_id) {
@@ -365,8 +359,6 @@ void e1000_set_media_type(struct e1000_hw *hw)
{
u32 status;
- e_dbg("e1000_set_media_type");
-
if (hw->mac_type != e1000_82543) {
/* tbi_compatibility is only valid on 82543 */
hw->tbi_compatibility_en = false;
@@ -415,8 +407,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
u32 led_ctrl;
s32 ret_val;
- e_dbg("e1000_reset_hw");
-
/* For 82542 (rev 2.0), disable MWI before issuing a device reset */
if (hw->mac_type == e1000_82542_rev2_0) {
e_dbg("Disabling MWI on 82542 rev 2.0\n");
@@ -566,8 +556,6 @@ s32 e1000_init_hw(struct e1000_hw *hw)
u32 mta_size;
u32 ctrl_ext;
- e_dbg("e1000_init_hw");
-
/* Initialize Identification LED */
ret_val = e1000_id_led_init(hw);
if (ret_val) {
@@ -683,8 +671,6 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
u16 eeprom_data;
s32 ret_val;
- e_dbg("e1000_adjust_serdes_amplitude");
-
if (hw->media_type != e1000_media_type_internal_serdes)
return E1000_SUCCESS;
@@ -730,8 +716,6 @@ s32 e1000_setup_link(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- e_dbg("e1000_setup_link");
-
/* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
@@ -848,8 +832,6 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
u32 signal = 0;
s32 ret_val;
- e_dbg("e1000_setup_fiber_serdes_link");
-
/* On adapters with a MAC newer than 82544, SWDP 1 will be
* set when the optics detect a signal. On older adapters, it will be
* cleared when there is a signal. This applies to fiber media only.
@@ -1051,8 +1033,6 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_preconfig");
-
ctrl = er32(CTRL);
/* With 82543, we need to force speed and duplex on the MAC equal to
* what the PHY speed and duplex configuration is. In addition, we need
@@ -1112,8 +1092,6 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_igp_setup");
-
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1254,8 +1232,6 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_mgp_setup");
-
if (hw->phy_reset_disable)
return E1000_SUCCESS;
@@ -1362,8 +1338,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_copper_link_autoneg");
-
/* Perform some bounds checking on the hw->autoneg_advertised
* parameter. If this variable is zero, then set it to the default.
*/
@@ -1432,7 +1406,6 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
{
s32 ret_val;
- e_dbg("e1000_copper_link_postconfig");
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
e1000_config_collision_dist(hw);
@@ -1473,8 +1446,6 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- e_dbg("e1000_setup_copper_link");
-
/* Check if it is a valid PHY and set PHY mode if necessary. */
ret_val = e1000_copper_link_preconfig(hw);
if (ret_val)
@@ -1554,8 +1525,6 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
- e_dbg("e1000_phy_setup_autoneg");
-
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
@@ -1707,8 +1676,6 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
u16 phy_data;
u16 i;
- e_dbg("e1000_phy_force_speed_duplex");
-
/* Turn off Flow control if we are forcing speed and duplex. */
hw->fc = E1000_FC_NONE;
@@ -1939,8 +1906,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
{
u32 tctl, coll_dist;
- e_dbg("e1000_config_collision_dist");
-
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
else
@@ -1970,8 +1935,6 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_config_mac_to_phy");
-
/* 82544 or newer MAC, Auto Speed Detection takes care of
* MAC speed/duplex configuration.
*/
@@ -2049,8 +2012,6 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
{
u32 ctrl;
- e_dbg("e1000_force_mac_fc");
-
/* Get the current configuration of the Device Control Register */
ctrl = er32(CTRL);
@@ -2120,8 +2081,6 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
u16 speed;
u16 duplex;
- e_dbg("e1000_config_fc_after_link_up");
-
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
@@ -2337,8 +2296,6 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
u32 status;
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_check_for_serdes_link_generic");
-
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
@@ -2449,8 +2406,6 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_for_link");
-
ctrl = er32(CTRL);
status = er32(STATUS);
@@ -2632,8 +2587,6 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_get_speed_and_duplex");
-
if (hw->mac_type >= e1000_82543) {
status = er32(STATUS);
if (status & E1000_STATUS_SPEED_1000) {
@@ -2699,7 +2652,6 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
u16 i;
u16 phy_data;
- e_dbg("e1000_wait_autoneg");
e_dbg("Waiting for Auto-Neg to complete.\n");
/* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -2866,8 +2818,6 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
u32 ret_val;
unsigned long flags;
- e_dbg("e1000_read_phy_reg");
-
spin_lock_irqsave(&e1000_phy_lock, flags);
if ((hw->phy_type == e1000_phy_igp) &&
@@ -2894,8 +2844,6 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
- e_dbg("e1000_read_phy_reg_ex");
-
if (reg_addr > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
@@ -3008,8 +2956,6 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
u32 ret_val;
unsigned long flags;
- e_dbg("e1000_write_phy_reg");
-
spin_lock_irqsave(&e1000_phy_lock, flags);
if ((hw->phy_type == e1000_phy_igp) &&
@@ -3036,8 +2982,6 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
u32 mdic = 0;
const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
- e_dbg("e1000_write_phy_reg_ex");
-
if (reg_addr > MAX_PHY_REG_ADDRESS) {
e_dbg("PHY Address %d is out of range\n", reg_addr);
return -E1000_ERR_PARAM;
@@ -3129,8 +3073,6 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
u32 ctrl, ctrl_ext;
u32 led_ctrl;
- e_dbg("e1000_phy_hw_reset");
-
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
@@ -3189,8 +3131,6 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_phy_reset");
-
switch (hw->phy_type) {
case e1000_phy_igp:
ret_val = e1000_phy_hw_reset(hw);
@@ -3229,8 +3169,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
u16 phy_id_high, phy_id_low;
bool match = false;
- e_dbg("e1000_detect_gig_phy");
-
if (hw->phy_id != 0)
return E1000_SUCCESS;
@@ -3301,7 +3239,6 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- e_dbg("e1000_phy_reset_dsp");
do {
ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3333,8 +3270,6 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
u16 phy_data, min_length, max_length, average;
e1000_rev_polarity polarity;
- e_dbg("e1000_phy_igp_get_info");
-
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter.
*/
@@ -3414,8 +3349,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
u16 phy_data;
e1000_rev_polarity polarity;
- e_dbg("e1000_phy_m88_get_info");
-
/* The downshift status is checked only once, after link is established,
* and it stored in the hw->speed_downgraded parameter.
*/
@@ -3487,8 +3420,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_phy_get_info");
-
phy_info->cable_length = e1000_cable_length_undefined;
phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
phy_info->cable_polarity = e1000_rev_polarity_undefined;
@@ -3527,8 +3458,6 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
{
- e_dbg("e1000_validate_mdi_settings");
-
if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
e_dbg("Invalid MDI setting detected\n");
hw->mdix = 1;
@@ -3551,8 +3480,6 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
u16 eeprom_size;
- e_dbg("e1000_init_eeprom_params");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -3770,8 +3697,6 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 eecd, i = 0;
- e_dbg("e1000_acquire_eeprom");
-
eecd = er32(EECD);
/* Request EEPROM Access */
@@ -3871,8 +3796,6 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
{
u32 eecd;
- e_dbg("e1000_release_eeprom");
-
eecd = er32(EECD);
if (hw->eeprom.type == e1000_eeprom_spi) {
@@ -3920,8 +3843,6 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
u16 retry_count = 0;
u8 spi_stat_reg;
- e_dbg("e1000_spi_eeprom_ready");
-
/* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
* bit 0 of the internal status register. If it's not cleared within
@@ -3974,8 +3895,6 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u32 i = 0;
- e_dbg("e1000_read_eeprom");
-
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
data);
@@ -4076,8 +3995,6 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- e_dbg("e1000_validate_eeprom_checksum");
-
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
e_dbg("EEPROM Read Error\n");
@@ -4112,8 +4029,6 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
u16 checksum = 0;
u16 i, eeprom_data;
- e_dbg("e1000_update_eeprom_checksum");
-
for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
e_dbg("EEPROM Read Error\n");
@@ -4154,8 +4069,6 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
s32 status = 0;
- e_dbg("e1000_write_eeprom");
-
if (hw->mac_type == e1000_ce4100) {
GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
data);
@@ -4205,8 +4118,6 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_eeprom_info *eeprom = &hw->eeprom;
u16 widx = 0;
- e_dbg("e1000_write_eeprom_spi");
-
while (widx < words) {
u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4274,8 +4185,6 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
u16 words_written = 0;
u16 i = 0;
- e_dbg("e1000_write_eeprom_microwire");
-
/* Send the write enable command to the EEPROM (3-bit opcode plus
* 6/8-bit dummy address beginning with 11). It's less work to include
* the 11 of the dummy address as part of the opcode than it is to shift
@@ -4354,8 +4263,6 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
u16 offset;
u16 eeprom_data, i;
- e_dbg("e1000_read_mac_addr");
-
for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
offset = i >> 1;
if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
@@ -4394,8 +4301,6 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
u32 i;
u32 rar_num;
- e_dbg("e1000_init_rx_addrs");
-
/* Setup the receive address. */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -4553,8 +4458,6 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
u16 eeprom_data, i, temp;
const u16 led_mask = 0x0F;
- e_dbg("e1000_id_led_init");
-
if (hw->mac_type < e1000_82540) {
/* Nothing to do */
return E1000_SUCCESS;
@@ -4626,8 +4529,6 @@ s32 e1000_setup_led(struct e1000_hw *hw)
u32 ledctl;
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_setup_led");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4678,8 +4579,6 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
- e_dbg("e1000_cleanup_led");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4714,8 +4613,6 @@ s32 e1000_led_on(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- e_dbg("e1000_led_on");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4760,8 +4657,6 @@ s32 e1000_led_off(struct e1000_hw *hw)
{
u32 ctrl = er32(CTRL);
- e_dbg("e1000_led_off");
-
switch (hw->mac_type) {
case e1000_82542_rev2_0:
case e1000_82542_rev2_1:
@@ -4889,8 +4784,6 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
*/
void e1000_reset_adaptive(struct e1000_hw *hw)
{
- e_dbg("e1000_reset_adaptive");
-
if (hw->adaptive_ifs) {
if (!hw->ifs_params_forced) {
hw->current_ifs_val = 0;
@@ -4917,8 +4810,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
*/
void e1000_update_adaptive(struct e1000_hw *hw)
{
- e_dbg("e1000_update_adaptive");
-
if (hw->adaptive_ifs) {
if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
if (hw->tx_packet_delta > MIN_NUM_XMITS) {
@@ -4986,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
- else if (*mac_addr & 0x01)
+ else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
@@ -5114,8 +5005,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
u16 i, phy_data;
u16 cable_length;
- e_dbg("e1000_get_cable_length");
-
*min_length = *max_length = 0;
/* Use old method for Phy older than IGP */
@@ -5231,8 +5120,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_polarity");
-
if (hw->phy_type == e1000_phy_m88) {
/* return the Polarity bit in the Status register. */
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
@@ -5299,8 +5186,6 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_check_downshift");
-
if (hw->phy_type == e1000_phy_igp) {
ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
&phy_data);
@@ -5411,8 +5296,6 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
s32 ret_val;
u16 phy_data, phy_saved_data, speed, duplex, i;
- e_dbg("e1000_config_dsp_after_link_change");
-
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5546,8 +5429,6 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
s32 ret_val;
u16 eeprom_data;
- e_dbg("e1000_set_phy_mode");
-
if ((hw->mac_type == e1000_82545_rev_3) &&
(hw->media_type == e1000_media_type_copper)) {
ret_val =
@@ -5594,7 +5475,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
{
s32 ret_val;
u16 phy_data;
- e_dbg("e1000_set_d3_lplu_state");
if (hw->phy_type != e1000_phy_igp)
return E1000_SUCCESS;
@@ -5699,8 +5579,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
u16 default_page = 0;
u16 phy_data;
- e_dbg("e1000_set_vco_speed");
-
switch (hw->mac_type) {
case e1000_82545_rev_3:
case e1000_82546_rev_3:
@@ -5872,7 +5750,6 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
*/
static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
{
- e_dbg("e1000_get_auto_rd_done");
msleep(5);
return E1000_SUCCESS;
}
@@ -5887,7 +5764,6 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
*/
static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
{
- e_dbg("e1000_get_phy_cfg_done");
msleep(10);
return E1000_SUCCESS;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 59ad007dd5a..660971f304b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
{
set_bit(__E1000_DOWN, &adapter->flags);
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
-
cancel_delayed_work_sync(&adapter->watchdog_task);
+
+ /*
+ * Since the watchdog task can reschedule other tasks, we should cancel
+ * it first, otherwise we can run into the situation when a work is
+ * still running after the adapter has been turned down.
+ */
+
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
+
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-static void e1000_reinit_safe(struct e1000_adapter *adapter)
-{
- while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
- msleep(1);
- mutex_lock(&adapter->mutex);
- e1000_down(adapter);
- e1000_up(adapter);
- mutex_unlock(&adapter->mutex);
- clear_bit(__E1000_RESETTING, &adapter->flags);
-}
-
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- /* if rtnl_lock is not held the call path is bogus */
- ASSERT_RTNL();
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -1018,19 +1012,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
- !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* according to DMA-API-HOWTO, coherent calls will always
- * succeed if the set call did
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
pr_err("No usable DMA config, aborting\n");
goto err_dma;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
netdev->netdev_ops = &e1000_netdev_ops;
@@ -1321,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
- mutex_init(&adapter->mutex);
set_bit(__E1000_DOWN, &adapter->flags);
@@ -1445,6 +1433,10 @@ static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -2330,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
phy_info_task.work);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
+
e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
- mutex_unlock(&adapter->mutex);
}
/**
@@ -2350,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
- mutex_lock(&adapter->mutex);
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2373,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
schedule_delayed_work(&adapter->fifo_stall_task, 1);
}
}
- mutex_unlock(&adapter->mutex);
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2427,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work)
struct e1000_tx_ring *txdr = adapter->tx_ring;
u32 link, tctl;
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
-
- mutex_lock(&adapter->mutex);
link = e1000_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link)
goto link_up;
@@ -2521,7 +2502,7 @@ link_up:
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
- goto unlock;
+ return;
}
}
@@ -2549,9 +2530,6 @@ link_up:
/* Reschedule the task */
if (!test_bit(__E1000_DOWN, &adapter->flags))
schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
-
-unlock:
- mutex_unlock(&adapter->mutex);
}
enum latency_range {
@@ -2704,14 +2682,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ int err;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
@@ -3128,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
*/
tx_ring = adapter->tx_ring;
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
@@ -3500,10 +3472,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
- if (test_bit(__E1000_DOWN, &adapter->flags))
- return;
e_err(drv, "Reset adapter\n");
- e1000_reinit_safe(adapter);
+ e1000_reinit_locked(adapter);
}
/**
@@ -3917,8 +3887,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
" next_to_watch <%x>\n"
" jiffies <%lx>\n"
" next_to_watch.status <%x>\n",
- (unsigned long)((tx_ring - adapter->tx_ring) /
- sizeof(struct e1000_tx_ring)),
+ (unsigned long)(tx_ring - adapter->tx_ring),
readl(hw->hw_addr + tx_ring->tdh),
readl(hw->hw_addr + tx_ring->tdt),
tx_ring->next_to_use,
@@ -4969,6 +4938,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev)) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
+
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3..08f22f34880 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
@@ -718,8 +711,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
@@ -1369,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
.setup_led = e1000e_setup_led_generic,
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 90d363b2d28..535a9430976 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_80003ES2LAN_H_
#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 8fed74e3fa5..218481e509f 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
@@ -1903,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
.config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 08e24dc3dc0..2e758f796d6 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_82571_H_
#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index c2dcfcc1085..106de493373 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351c94a0cf7..d18e8921257 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -35,9 +28,11 @@
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define E1000_WUC_APME 0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
-#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ad0edd11015..7785240a0da 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -269,12 +262,13 @@ struct e1000_adapter {
u32 tx_head_addr;
u32 tx_fifo_size;
u32 tx_dma_failed;
+ u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -333,7 +327,6 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct print_hang_task;
- bool idle_check;
int phy_hang_count;
u16 tx_ring_count;
@@ -342,6 +335,7 @@ struct e1000_adapter {
struct hwtstamp_config hwtstamp_config;
struct delayed_work systim_overflow_work;
struct sk_buff *tx_hwtstamp_skb;
+ unsigned long tx_hwtstamp_start;
struct work_struct tx_hwtstamp_work;
spinlock_t systim_lock; /* protects SYSTIML/H regsters */
struct cyclecounter cc;
@@ -397,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
*/
#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
@@ -472,26 +468,25 @@ enum latency_range {
extern char e1000e_driver_name[];
extern const char e1000e_driver_version[];
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter, bool reset);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
@@ -508,8 +503,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -536,7 +531,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
@@ -580,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-/**
- * __ew32_prepare - prepare to write to MAC CSR register on certain parts
- * @hw: pointer to the HW structure
- *
- * When updating the MAC CSR registers, the Manageability Engine (ME) could
- * be accessing the registers at the same time. Normally, this is handled in
- * h/w by an arbiter but on some parts there is a bug that acknowledges Host
- * accesses later than it should which could result in the register to have
- * an incorrect value. Workaround this by checking the FWSM register which
- * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
- * and try again a number of times.
- **/
-static inline s32 __ew32_prepare(struct e1000_hw *hw)
-{
- s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
-
- while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
-
- return i;
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
- if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- __ew32_prepare(hw);
-
- writel(val, hw->hw_addr + reg);
-}
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d14c8f53384..815e26c6d34 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for e1000 */
@@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
+ E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
};
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -165,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
@@ -175,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -332,7 +327,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* reset the link */
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -380,7 +375,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
} else {
e1000e_reset(adapter);
@@ -726,7 +721,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
pm_runtime_get_sync(netdev->dev.parent);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -789,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -924,15 +920,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
- if (i == 7)
+ if (i == 1)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
- if (i == 10)
+ if (i == 4)
mask |= (1 << 30);
+ /* RAR[1-6] owned by management engine - skipping */
+ if (i > 0)
+ i += 6;
}
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
+ /* reset index to actual value */
+ if ((mac->type == e1000_pch2lan) && (i > 6))
+ i -= 6;
}
for (i = 0; i < mac->mta_reg_count; i++)
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b7f38435d1f..72f5475c4b9 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -476,8 +469,9 @@ struct e1000_mac_operations {
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ int (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
+ u32 (*rar_get_count)(struct e1000_hw *);
};
/* When to use various PHY register access functions:
@@ -655,12 +649,20 @@ struct e1000_shadow_ram {
#define E1000_ICH8_SHADOW_RAM_WORDS 2048
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+ e1000_ulp_state_unknown,
+ e1000_ulp_state_off,
+ e1000_ulp_state_on,
+};
+
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
bool nvm_k1_enabled;
bool eee_disable;
u16 eee_lp_ability;
+ enum e1000_ulp_state ulp_state;
};
struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 42f0f671751..8894ab8ed6b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
@@ -53,6 +46,14 @@
* 82578DC Gigabit Network Connection
* 82579LM Gigabit Network Connection
* 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
*/
#include "e1000.h"
@@ -138,11 +139,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -183,7 +187,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
u16 phy_reg = 0;
u32 phy_id = 0;
- s32 ret_val;
+ s32 ret_val = 0;
u16 retry_count;
u32 mac_reg = 0;
@@ -214,11 +218,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
- hw->phy.ops.release(hw);
- ret_val = e1000_set_mdio_slow_mode_hv(hw);
- if (!ret_val)
- ret_val = e1000e_get_phy_id(hw);
- hw->phy.ops.acquire(hw);
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
if (ret_val)
return false;
@@ -239,6 +245,47 @@ out:
}
/**
+ * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ * @hw: pointer to the HW structure
+ *
+ * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ * used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+ u32 mac_reg;
+
+ /* Set Phy Config Counter to 50msec */
+ mac_reg = er32(FEXTNVM3);
+ mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+ mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+ ew32(FEXTNVM3, mac_reg);
+
+ /* Toggle LANPHYPC Value bit */
+ mac_reg = er32(CTRL);
+ mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+ usleep_range(10, 20);
+ mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, mac_reg);
+ e1e_flush();
+
+ if (hw->mac.type < e1000_pch_lpt) {
+ msleep(50);
+ } else {
+ u16 count = 20;
+
+ do {
+ usleep_range(5000, 10000);
+ } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
+
+ msleep(30);
+ }
+}
+
+/**
* e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
* @hw: pointer to the HW structure
*
@@ -247,6 +294,7 @@ out:
**/
static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
@@ -255,6 +303,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
e1000_gate_hw_phy_config_ich8lan(hw, true);
+ /* It is not possible to be certain of the current state of ULP
+ * so forcibly disable it.
+ */
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+ e1000_disable_ulp_lpt_lp(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +354,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
break;
}
- e_dbg("Toggling LANPHYPC\n");
-
- /* Set Phy Config Counter to 50msec */
- mac_reg = er32(FEXTNVM3);
- mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
- mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
- ew32(FEXTNVM3, mac_reg);
-
/* Toggle LANPHYPC Value bit */
- mac_reg = er32(CTRL);
- mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
- mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- usleep_range(10, 20);
- mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
- ew32(CTRL, mac_reg);
- e1e_flush();
- if (hw->mac.type < e1000_pch_lpt) {
- msleep(50);
- } else {
- u16 count = 20;
- do {
- usleep_range(5000, 10000);
- } while (!(er32(CTRL_EXT) &
- E1000_CTRL_EXT_LPCD) && count--);
- usleep_range(30000, 60000);
+ e1000_toggle_lanphypc_pch_lpt(hw);
+ if (hw->mac.type >= e1000_pch_lpt) {
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -349,12 +379,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
if (!ret_val) {
+
+ /* Check to see if able to reset PHY. Print error if not */
+ if (hw->phy.ops.check_reset_block(hw)) {
+ e_err("Reset blocked by ME\n");
+ goto out;
+ }
+
/* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ /* On a successful reset, possibly need to wait for the PHY
+ * to quiesce to an accessible state before returning control
+ * to the calling function. If the PHY does not quiesce, then
+ * return E1000E_BLK_PHY_RESET, as this is the condition that
+ * the PHY is in.
+ */
+ ret_val = hw->phy.ops.check_reset_block(hw);
+ if (ret_val)
+ e_err("ME blocked access to PHY after reset\n");
}
out:
@@ -656,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
e1000_setup_copper_link_pch_lpt;
+ mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -724,8 +774,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
* Enable/disable EEE based on setting in dev_spec structure, the duplex of
* the link and the EEE capabilities of the link partner. The LPI Control
* register bits will remain set only if/when link is up.
+ *
+ * EEE LPI must not be asserted earlier than one second after link is up.
+ * On 82579, EEE LPI should not be enabled until such time otherwise there
+ * can be link issues with some switches. Other devices can have EEE LPI
+ * enabled immediately upon link up since they have a timer in hardware which
+ * prevents LPI from being asserted too early.
**/
-static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
@@ -790,6 +846,17 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
}
}
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
+ if (ret_val)
+ goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
+ }
+
/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
if (ret_val)
@@ -979,6 +1046,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @to_sx: boolean indicating a system power state transition to Sx
+ *
+ * When link is down, configure ULP mode to significantly reduce the power
+ * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
+ * ME firmware to start the ULP configuration. If not on an ME enabled
+ * system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+ u32 mac_reg;
+ s32 ret_val = 0;
+ u16 phy_reg;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ /* Request ME configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+
+ goto out;
+ }
+
+ if (!to_sx) {
+ int i = 0;
+
+ /* Poll up to 5 seconds for Cable Disconnected indication */
+ while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+ /* Bail if link is re-acquired */
+ if (er32(STATUS) & E1000_STATUS_LU)
+ return -E1000_ERR_PHY;
+
+ if (i++ == 100)
+ break;
+
+ msleep(50);
+ }
+ e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
+ (er32(FEXT) &
+ E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ /* Force SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Force SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* Set Inband ULP Exit, Reset to SMBus mode and
+ * Disable SMBus Release on PERST# in PHY
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ if (to_sx) {
+ if (er32(WUFC) & E1000_WUFC_LNKC)
+ phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+ phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+ } else {
+ phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+ }
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Set Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+ /* Commit ULP changes in PHY by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+ hw->phy.ops.release(hw);
+out:
+ if (ret_val)
+ e_dbg("Error in ULP enable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+ return ret_val;
+}
+
+/**
+ * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ * @hw: pointer to the HW structure
+ * @force: boolean indicating whether or not to force disabling ULP
+ *
+ * Un-configure ULP mode when link is up, the system is transitioned from
+ * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
+ * system, poll for an indication from ME that ULP has been un-configured.
+ * If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ * During nominal operation, this function is called when link is acquired
+ * to disable ULP mode (force=false); otherwise, for example when unloading
+ * the driver or during Sx->S0 transitions, this is called with force=true
+ * to forcibly disable ULP.
+ */
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+ s32 ret_val = 0;
+ u32 mac_reg;
+ u16 phy_reg;
+ int i = 0;
+
+ if ((hw->mac.type < e1000_pch_lpt) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+ (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+ return 0;
+
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (force) {
+ /* Request ME un-configure ULP mode in the PHY */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ }
+
+ /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
+ if (i++ == 10) {
+ ret_val = -E1000_ERR_PHY;
+ goto out;
+ }
+
+ usleep_range(10000, 20000);
+ }
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+ if (force) {
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+ ew32(H2ME, mac_reg);
+ } else {
+ /* Clear H2ME.ULP after ME ULP configuration */
+ mac_reg = er32(H2ME);
+ mac_reg &= ~E1000_H2ME_ULP;
+ ew32(H2ME, mac_reg);
+ }
+
+ goto out;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ goto out;
+
+ if (force)
+ /* Toggle LANPHYPC Value bit */
+ e1000_toggle_lanphypc_pch_lpt(hw);
+
+ /* Unforce SMBus mode in PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+ if (ret_val) {
+ /* The MAC might be in PCIe mode, so temporarily force to
+ * SMBus mode in order to access the PHY.
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ msleep(50);
+
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ }
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ /* When ULP mode was previously entered, K1 was disabled by the
+ * hardware. Re-Enable K1 in the PHY when exiting ULP.
+ */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_PM_CTRL_K1_ENABLE;
+ e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+ /* Clear ULP enabled configuration */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg &= ~(I218_ULP_CONFIG1_IND |
+ I218_ULP_CONFIG1_STICKY_ULP |
+ I218_ULP_CONFIG1_RESET_TO_SMBUS |
+ I218_ULP_CONFIG1_WOL_HOST |
+ I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Commit ULP changes by starting auto ULP configuration */
+ phy_reg |= I218_ULP_CONFIG1_START;
+ e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+ /* Clear Disable SMBus Release on PERST# in MAC */
+ mac_reg = er32(FEXTNVM7);
+ mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+ ew32(FEXTNVM7, mac_reg);
+
+release:
+ hw->phy.ops.release(hw);
+ if (force) {
+ e1000_phy_hw_reset(hw);
+ msleep(50);
+ }
+out:
+ if (ret_val)
+ e_dbg("Error in ULP disable flow: %d\n", ret_val);
+ else
+ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+ return ret_val;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -1015,14 +1329,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+ /* When connected at 10Mbps half-duplex, some parts are excessively
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if ((hw->mac.type == e1000_pch2lan) && link) {
+ if (((hw->mac.type == e1000_pch2lan) ||
+ (hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ u16 emi_addr;
+
reg = er32(TIPG);
reg &= ~E1000_TIPG_IPGT_MASK;
reg |= 0xFF;
@@ -1033,8 +1351,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val =
- e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
hw->phy.ops.release(hw);
@@ -1106,9 +1428,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
e1000e_check_downshift(hw);
/* Enable/Disable EEE after link up */
- ret_val = e1000_set_eee_pchlan(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.type > e1000_phy_82579) {
+ ret_val = e1000_set_eee_pchlan(hw);
+ if (ret_val)
+ return ret_val;
+ }
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
@@ -1313,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1346,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1368,13 +1692,13 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* RAR[1-6] are owned by manageability. Skip those and program the
* next address into the SHRA register array.
*/
- if (index < (u32)(hw->mac.rar_entry_count - 6)) {
+ if (index < (u32)(hw->mac.rar_entry_count)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1391,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL(index - 1)) == rar_low) &&
(er32(SHRAH(index - 1)) == rar_high))
- return;
+ return 0;
e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), er32(FWSM));
@@ -1399,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ * @hw: pointer to the HW structure
+ *
+ * Get the number of available receive registers that the Host can
+ * program. SHRA[0-10] are the shared receive address registers
+ * that are shared between the Host and manageability engine (ME).
+ * ME can reserve any number of addresses and the host needs to be
+ * able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+ u32 wlock_mac;
+ u32 num_entries;
+
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ switch (wlock_mac) {
+ case 0:
+ /* All SHRA[0..10] and RAR[0] available */
+ num_entries = hw->mac.rar_entry_count;
+ break;
+ case 1:
+ /* Only RAR[0] available */
+ num_entries = 1;
+ break;
+ default:
+ /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+ num_entries = wlock_mac + 1;
+ break;
+ }
+
+ return num_entries;
}
/**
@@ -1412,7 +1773,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1434,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1466,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return 0;
}
}
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -1484,11 +1846,13 @@ out:
**/
static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
{
- u32 fwsm;
+ bool blocked = false;
+ int i = 0;
- fwsm = er32(FWSM);
-
- return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+ while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
+ (i++ < 10))
+ usleep_range(10000, 20000);
+ return blocked ? E1000_BLK_PHY_RESET : 0;
}
/**
@@ -2190,51 +2554,44 @@ release:
* e1000_k1_gig_workaround_lv - K1 Si workaround
* @hw: pointer to the HW structure
*
- * Workaround to set the K1 beacon duration for 82579 parts
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 in 1000Mbps and 100Mbps
**/
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 status_reg = 0;
- u32 mac_reg;
- u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
return 0;
- /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ /* Set K1 beacon duration based on 10Mbs speed */
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
if (ret_val)
return ret_val;
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
- mac_reg = er32(FEXTNVM4);
- mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
- if (ret_val)
- return ret_val;
-
- if (status_reg & HV_M_STATUS_SPEED_1000) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
u16 pm_phy_reg;
- mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
- phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
- /* LV 1G Packet drop issue wa */
+ /* LV 1G/100 Packet drop issue wa */
ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
if (ret_val)
return ret_val;
- pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
if (ret_val)
return ret_val;
} else {
+ u32 mac_reg;
+
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
- phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ ew32(FEXTNVM4, mac_reg);
}
- ew32(FEXTNVM4, mac_reg);
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
return ret_val;
@@ -4660,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 217090df33e..5515126c81c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_ICH8LAN_H_
#define _E1000E_ICH8LAN_H_
@@ -65,11 +58,16 @@
#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
/* Shared Receive Address Registers */
#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+#define E1000_H2ME 0x05B50 /* Host to ME */
+#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
+
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
@@ -82,6 +80,9 @@
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
+
#define E1000_FEXTNVM_SW_CONFIG 1
#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
@@ -95,10 +96,12 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
@@ -161,6 +164,16 @@
#define CV_SMB_CTRL PHY_REG(769, 23)
#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1 PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
+
/* SMBus Address Phy Register */
#define HV_SMB_ADDR PHY_REG(768, 26)
#define HV_SMB_ADDR_MASK 0x007F
@@ -195,6 +208,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE 0x4000
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
@@ -218,16 +232,19 @@
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG 0xB20C /* Receive configuration */
#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
@@ -268,4 +285,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 2480c109187..8c386f3a15e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
@@ -218,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+ return hw->mac.rar_entry_count;
+}
+
/**
* e1000e_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
@@ -227,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -251,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index a61fee404eb..0513d90cdee 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MAC_H_
#define _E1000E_MAC_H_
@@ -68,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e4b0f1ef92f..cb37ff1f132 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 326897c29ea..a8c27f98f7b 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_MANAGE_H_
#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 4ef786775ac..201cc93f362 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -131,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
};
/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time. Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value. Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+ udelay(50);
+
+ return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ __ew32_prepare(hw);
+
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
* @reginfo: pointer to the register info table
@@ -606,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -622,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -885,7 +910,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
struct sk_buff *skb)
{
if (netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rss);
+ skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
}
/**
@@ -1097,8 +1122,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = true;
return;
}
- /* Real hang detected */
adapter->tx_hang_recheck = false;
+
+ if (er32(TDH(0)) == er32(TDT(0))) {
+ e_dbg("false hang detected, ignoring\n");
+ return;
+ }
+
+ /* Real hang detected */
netif_stop_queue(netdev);
e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1159,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
phy_status, phy_1000t_status, phy_ext_status, pci_status);
+ e1000e_dump(adapter);
+
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1147,9 +1180,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
tx_hwtstamp_work);
struct e1000_hw *hw = &adapter->hw;
- if (!adapter->tx_hwtstamp_skb)
- return;
-
if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;
@@ -1162,6 +1192,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
+ } else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ + adapter->tx_timeout_factor * HZ)) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ e_warn("clearing Tx timestamp hang\n");
} else {
/* reschedule to check later */
schedule_work(&adapter->tx_hwtstamp_work);
@@ -1194,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1701,7 +1738,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
writel(0, rx_ring->head);
- if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, 0);
else
writel(0, rx_ring->tail);
@@ -1749,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1956,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2038,13 +2077,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
@@ -2197,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2402,7 +2445,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
tx_ring->next_to_clean = 0;
writel(0, tx_ring->head);
- if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, 0);
else
writel(0, tx_ring->tail);
@@ -2894,7 +2937,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
u64 tdba;
- u32 tdlen, tarc;
+ u32 tdlen, tctl, tarc;
/* Setup the HW Tx Head and Tail descriptor pointers */
tdba = tx_ring->dma;
@@ -2914,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -2931,6 +2975,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* erratum work around: set txdctl the same for both queues */
ew32(TXDCTL(1), er32(TXDCTL(0)));
+ /* Program the Transmit Control Register */
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_CT;
+ tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+ (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
/* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +3011,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_TXD_CMD_RS;
+ ew32(TCTL, tctl);
+
hw->mac.ops.config_collision_dist(hw);
}
@@ -2976,11 +3028,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 rctl, rfctl;
u32 pages = 0;
- /* Workaround Si errata on PCHx - configure jumbo frame flow */
- if ((hw->mac.type >= e1000_pch2lan) &&
- (adapter->netdev->mtu > ETH_DATA_LEN) &&
- e1000_lv_jumbo_workaround_ich8lan(hw, true))
- e_dbg("failed to enable jumbo frame workaround mode\n");
+ /* Workaround Si errata on PCHx - configure jumbo frame flow.
+ * If jumbo frames not set, program related MAC/PHY registers
+ * to h/w defaults
+ */
+ if (hw->mac.type >= e1000_pch2lan) {
+ s32 ret_val;
+
+ if (adapter->netdev->mtu > ETH_DATA_LEN)
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+ else
+ ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+ if (ret_val)
+ e_dbg("failed to enable|disable jumbo frame workaround mode\n");
+ }
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3214,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -3278,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.rar_entry_count;
+ unsigned int rar_entries;
int count = 0;
+ rar_entries = hw->mac.ops.rar_get_count(hw);
+
/* save a rar entry for our hardware address */
rar_entries--;
@@ -3299,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
+ int rval;
+
if (!rar_entries)
break;
- hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (rval < 0)
+ return -ENOMEM;
count++;
}
}
@@ -3331,6 +3400,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
+ if (pm_runtime_suspended(netdev->dev.parent))
+ return;
+
/* Check for Promiscuous and All Multicast modes */
rctl = er32(RCTL);
@@ -3482,10 +3554,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config *config = &adapter->hwtstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 rxmtrl = 0;
@@ -3586,6 +3658,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
return -ERANGE;
}
+ adapter->hwtstamp_config = *config;
+
/* enable/disable Tx h/w time stamping */
regval = er32(TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -3689,10 +3763,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
*/
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
- /* WoL is enabled */
- if (adapter->wol)
- return;
-
if (adapter->hw.phy.ops.power_down)
adapter->hw.phy.ops.power_down(&adapter->hw);
}
@@ -3874,7 +3944,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
/* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter);
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -3909,10 +3979,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
if (!netif_running(adapter->netdev) &&
- !test_bit(__E1000_TESTING, &adapter->state)) {
+ !test_bit(__E1000_TESTING, &adapter->state))
e1000_power_down_phy(adapter);
- return;
- }
e1000_get_phy_info(hw);
@@ -3979,7 +4047,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
static void e1000e_update_stats(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter)
+/**
+ * e1000e_down - quiesce the device and optionally reset the hardware
+ * @adapter: board private structure
+ * @reset: boolean flag to reset the hardware or not
+ */
+void e1000e_down(struct e1000_adapter *adapter, bool reset)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -4033,12 +4106,8 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (!pci_channel_offline(adapter->pdev))
+ if (reset && !pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
-
- /* TODO: for power management, we could drop the link and
- * pci_disable_device here.
- */
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4046,7 +4115,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
}
@@ -4060,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- cycle_t systim;
+ cycle_t systim, systim_next;
/* latch SYSTIMH on read of SYSTIML */
systim = (cycle_t)er32(SYSTIML);
systim |= (cycle_t)er32(SYSTIMH) << 32;
+ if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+ u64 incvalue, time_delta, rem, temp;
+ int i;
+
+ /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue
+ */
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ rem = do_div(temp, incvalue);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+ (rem == 0))
+ break;
+ }
+ }
return systim;
}
@@ -4324,7 +4418,6 @@ static int e1000_open(struct net_device *netdev)
adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
- adapter->idle_check = true;
hw->mac.get_link_status = true;
pm_runtime_put(&pdev->dev);
@@ -4374,14 +4467,15 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
e1000_free_irq(adapter);
+
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
}
napi_disable(&adapter->napi);
- e1000_power_down_phy(adapter);
-
e1000e_free_tx_resources(adapter->tx_ring);
e1000e_free_rx_resources(adapter->rx_ring);
@@ -4458,11 +4552,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
update_phy_task);
+ struct e1000_hw *hw = &adapter->hw;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- e1000_get_phy_info(&adapter->hw);
+ e1000_get_phy_info(hw);
+
+ /* Enable EEE on 82579 after link up */
+ if (hw->phy.type >= e1000_phy_82579)
+ e1000_set_eee_pchlan(hw);
}
/**
@@ -4665,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4778,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4797,6 +4898,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
if (adapter->phy_hang_count > 1) {
adapter->phy_hang_count = 0;
+ e_dbg("PHY appears hung - resetting\n");
schedule_work(&adapter->reset_task);
}
}
@@ -4899,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -4955,15 +5058,11 @@ static void e1000_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- /* The link is lost so the controller stops DMA.
- * If there is queued Tx work that cannot be done
- * or if on an 8000ES2LAN which requires a Rx packet
- * buffer work-around on link down event, reset the
- * controller to flush the Tx/Rx packet buffers.
- * (Do the reset outside of interrupt context).
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
*/
- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
@@ -4986,6 +5085,15 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
+ /* If the link is lost the controller stops DMA, but
+ * if there is queued Tx work it cannot be done. So
+ * reset the controller to flush the Tx packet buffers.
+ */
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
+
+ /* If reset is necessary, do it outside of interrupt context. */
if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
@@ -5064,16 +5172,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
u32 cmd_length = 0;
u16 ipcse = 0, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
+ int err;
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
@@ -5136,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5181,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -5544,6 +5650,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
adapter->tx_hwtstamp_skb = skb_get(skb);
+ adapter->tx_hwtstamp_start = jiffies;
schedule_work(&adapter->tx_hwtstamp_work);
} else {
skb_tx_timestamp(skb);
@@ -5652,7 +5759,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -5682,8 +5789,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
@@ -5709,6 +5819,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -5788,7 +5900,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
* specified. Matching the kind of event packet is not supported, with the
* exception of "all V2 events regardless of level 2 or 4".
**/
-static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -5797,14 +5909,10 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- adapter->hwtstamp_config = config;
-
- ret_val = e1000e_config_hwtstamp(adapter);
+ ret_val = e1000e_config_hwtstamp(adapter, &config);
if (ret_val)
return ret_val;
- config = adapter->hwtstamp_config;
-
switch (config.rx_filter) {
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@@ -5827,6 +5935,14 @@ static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
+static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5835,7 +5951,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
case SIOCSHWTSTAMP:
- return e1000e_hwtstamp_ioctl(netdev, ifr);
+ return e1000e_hwtstamp_set(netdev, ifr);
+ case SIOCGHWTSTAMP:
+ return e1000e_hwtstamp_get(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -5844,7 +5962,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
{
struct e1000_hw *hw = &adapter->hw;
- u32 i, mac_reg;
+ u32 i, mac_reg, wuc;
u16 phy_reg, wuc_enable;
int retval;
@@ -5891,13 +6009,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg |= BM_RCTL_RFCE;
hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+ wuc = E1000_WUC_PME_EN;
+ if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
+ wuc |= E1000_WUC_APME;
+
/* enable PHY wakeup in MAC register */
ew32(WUFC, wufc);
- ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+ ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
+ E1000_WUC_PME_STATUS | wuc));
/* configure and enable PHY wakeup in PHY registers */
hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
- hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+ hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
/* activate PHY wakeup */
wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5910,15 +6033,10 @@ release:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
- int retval = 0;
netif_device_detach(netdev);
@@ -5929,11 +6047,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
usleep_range(10000, 20000);
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ /* Quiesce the device without resetting the hardware */
+ e1000e_down(adapter, false);
e1000_free_irq(adapter);
}
e1000e_reset_interrupt_capability(adapter);
+ /* Allow time for pending master requests to run */
+ e1000e_disable_pcie_master(&adapter->hw);
+
+ return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl, ctrl_ext, rctl, status;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ int retval = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5964,12 +6100,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
ew32(CTRL_EXT, ctrl_ext);
}
+ if (!runtime)
+ e1000e_power_up_phy(adapter);
+
if (adapter->flags & FLAG_IS_ICH)
e1000_suspend_workarounds_ich8lan(&adapter->hw);
- /* Allow time for pending master requests to run */
- e1000e_disable_pcie_master(&adapter->hw);
-
if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
/* enable wakeup by the PHY */
retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5983,10 +6119,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
} else {
ew32(WUC, 0);
ew32(WUFC, 0);
+
+ e1000_power_down_phy(adapter);
}
- if (adapter->hw.phy.type == e1000_phy_igp_3)
+ if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+ } else if (hw->mac.type == e1000_pch_lpt) {
+ if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ /* ULP does not support wake from unicast, multicast
+ * or broadcast.
+ */
+ retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+
+ if (retval)
+ return retval;
+ }
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -6094,18 +6243,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
}
#ifdef CONFIG_PM
-static bool e1000e_pm_ready(struct e1000_adapter *adapter)
-{
- return !!adapter->tx_ring->buffer_info;
-}
-
static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 aspm_disable_flag = 0;
- u32 err;
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6116,13 +6259,6 @@ static int __e1000_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- e1000e_set_interrupt_capability(adapter);
- if (netif_running(netdev)) {
- err = e1000_request_irq(adapter);
- if (err)
- return err;
- }
-
if (hw->mac.type >= e1000_pch2lan)
e1000_resume_workarounds_pchlan(&adapter->hw);
@@ -6145,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -6161,11 +6298,6 @@ static int __e1000_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter);
- if (netif_running(netdev))
- e1000e_up(adapter);
-
- netif_device_attach(netdev);
-
/* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
@@ -6177,74 +6309,110 @@ static int __e1000_resume(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int e1000_suspend(struct device *dev)
+static int e1000e_pm_thaw(struct device *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ e1000e_set_interrupt_capability(adapter);
+ if (netif_running(netdev)) {
+ u32 err = e1000_request_irq(adapter);
+
+ if (err)
+ return err;
+
+ e1000e_up(adapter);
+ }
+
+ netif_device_attach(netdev);
+
+ return 0;
+}
+
+static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ e1000e_pm_freeze(dev);
+
return __e1000_shutdown(pdev, false);
}
-static int e1000_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (e1000e_pm_ready(adapter))
- adapter->idle_check = true;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- return __e1000_resume(pdev);
+ return e1000e_pm_thaw(dev);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
-static int e1000_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
- return __e1000_shutdown(pdev, true);
+ return -EBUSY;
}
-static int e1000_idle(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int rc;
- if (!e1000e_pm_ready(adapter))
- return 0;
+ rc = __e1000_resume(pdev);
+ if (rc)
+ return rc;
- if (adapter->idle_check) {
- adapter->idle_check = false;
- if (!e1000e_has_link(adapter))
- pm_schedule_suspend(dev, MSEC_PER_SEC);
- }
+ if (netdev->flags & IFF_UP)
+ rc = e1000e_up(adapter);
- return -EBUSY;
+ return rc;
}
-static int e1000_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (!e1000e_pm_ready(adapter))
- return 0;
+ if (netdev->flags & IFF_UP) {
+ int count = E1000_CHECK_RESET_COUNT;
+
+ while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+ usleep_range(10000, 20000);
+
+ WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- adapter->idle_check = !dev->power.runtime_auto;
- return __e1000_resume(pdev);
+ /* Down the device without resetting the hardware */
+ e1000e_down(adapter, false);
+ }
+
+ if (__e1000_shutdown(pdev, true)) {
+ e1000e_pm_runtime_resume(dev);
+ return -EBUSY;
+ }
+
+ return 0;
}
#endif /* CONFIG_PM_RUNTIME */
#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
+ e1000e_pm_freeze(&pdev->dev);
+
__e1000_shutdown(pdev, false);
}
@@ -6330,7 +6498,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
- e1000e_down(adapter);
+ e1000e_down(adapter, true);
pci_disable_device(pdev);
/* Request a slot slot reset. */
@@ -6342,7 +6510,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the e1000_resume routine.
+ * resembles the first-half of the e1000e_pm_resume routine.
*/
static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
{
@@ -6389,7 +6557,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the e1000_resume routine.
+ * second-half of the e1000e_pm_resume routine.
*/
static void e1000_io_resume(struct pci_dev *pdev)
{
@@ -6553,21 +6721,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
@@ -6900,9 +7062,6 @@ static void e1000_remove(struct pci_dev *pdev)
}
}
- if (!(netdev->flags & IFF_UP))
- e1000_power_down_phy(adapter);
-
/* Don't lie to e1000_close() down the road. */
if (!down)
clear_bit(__E1000_DOWN, &adapter->state);
@@ -6941,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7023,13 +7182,18 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
- e1000_idle)
-};
+#ifdef CONFIG_PM_SLEEP
+ .suspend = e1000e_pm_suspend,
+ .resume = e1000e_pm_resume,
+ .freeze = e1000e_pm_freeze,
+ .thaw = e1000e_pm_thaw,
+ .poweroff = e1000e_pm_suspend,
+ .restore = e1000e_pm_resume,
#endif
+ SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
+};
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
@@ -7037,11 +7201,9 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
-#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
},
-#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -7055,9 +7217,10 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index d70a03906ac..b1f212b7baf 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
@@ -405,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 45fc6956162..342bf69efab 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_NVM_H_
#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index c16bd75b6ca..aa1923f7ebd 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
+ case 2:
+ dev_info(&adapter->pdev->dev,
+ "%s Invalid mode - setting default\n",
+ opt.name);
+ adapter->itr_setting = opt.def;
+ /* fall-through */
case 3:
dev_info(&adapter->pdev->dev,
"%s set to dynamic conservative mode\n",
@@ -437,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -458,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -474,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -496,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c..b2005e13fb0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000.h"
@@ -1757,19 +1750,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
- if (ret_val)
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval / 1000);
+ msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
@@ -2899,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index f4f71b9991e..537d2780b40 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_PHY_H_
#define _E1000E_PHY_H_
@@ -171,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
#define HV_M_STATUS_SPEED_MASK 0x0300
#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_SPEED_100 0x0100
#define HV_M_STATUS_LINK_UP 0x0040
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 065f8c80d4f..fb1a914a3ad 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* PTP 1588 Hardware Clock (PHC)
* Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
bool neg_adj = false;
+ unsigned long flags;
u64 adjustment;
u32 timinca, incvalue;
s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (ret_val)
return ret_val;
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
return 0;
}
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
+ .n_pins = 0,
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index a7e6a3e3725..ea235bbe50d 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
- Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- Linux NICS <linux.nics@intel.com>
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000E_REGS_H_
#define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXT 0x0002C /* Future Extended - RW */
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 479b2c4e552..d9eb80acac4 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
@@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \
i40e_debugfs.o \
i40e_diag.o \
i40e_txrx.o \
+ i40e_ptp.o \
i40e_virtchnl_pf.o
+
+i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index b5252eb8a6c..65985846345 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -29,7 +28,7 @@
#define _I40E_H_
#include <net/tcp.h>
-#include <linux/init.h>
+#include <net/udp.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -46,16 +45,19 @@
#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
-#include <linux/version.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
+#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_BASE_PF_SEID 16
@@ -64,15 +66,17 @@
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
-#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_MAX_REGISTER 0x800000
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */
#define I40E_FDIR_RING 0
#define I40E_FDIR_RING_COUNT 32
#define I40E_MAX_AQ_BUF_SIZE 4096
@@ -82,20 +86,18 @@
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_MID_SHIFT 4
-#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
+#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+/* The values in here are decimal coded as hex as is the case in the NVM map*/
+#define I40E_CURRENT_NVM_VERSION_HI 0x2
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
+
/* magic for getting defines into strings */
#define STRINGIFY(foo) #foo
#define XSTRINGIFY(bar) STRINGIFY(bar)
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
#define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \
? (union i40e_32byte_rx_desc *) \
@@ -128,7 +130,10 @@ enum i40e_state_t {
__I40E_PF_RESET_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_EMP_RESET_REQUESTED,
__I40E_FILTER_OVERFLOW_PROMISC,
+ __I40E_SUSPENDED,
+ __I40E_BAD_EEPROM,
};
enum i40e_interrupt_policy {
@@ -145,8 +150,33 @@ struct i40e_lump_tracking {
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
-#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
-struct i40e_fdir_data {
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
+#define I40E_FDIR_BUFFER_FULL_MARGIN 10
+#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+
+enum i40e_fd_stat_idx {
+ I40E_FD_STAT_ATR,
+ I40E_FD_STAT_SB,
+ I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+
+struct i40e_fdir_filter {
+ struct hlist_node fdir_node;
+ /* filter ipnut set */
+ u8 flow_type;
+ u8 ip4_proto;
+ /* TX packet view of src and dst */
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 sctp_v_tag;
+ /* filter control */
u16 q_index;
u8 flex_off;
u8 pctype;
@@ -155,9 +185,10 @@ struct i40e_fdir_data {
u8 fd_status;
u16 cnt_index;
u32 fd_id;
- u8 *raw_packet;
};
+#define I40E_ETH_P_LLDP 0x88cc
+
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
@@ -183,23 +214,34 @@ struct i40e_pf {
unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries;
- u16 num_msix_entries;
bool fc_autoneg_status;
u16 eeprom_version;
- u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
u16 num_req_vfs; /* num vfs requested for this vf */
u16 num_vf_qps; /* num queue pairs per vf */
- u16 num_tc_qps; /* num queue pairs per TC */
u16 num_lan_qps; /* num lan queues this pf has set up */
u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
+ bool wol_en;
+
+ struct hlist_head fdir_filter_list;
+ u16 fdir_pf_active_filters;
+ u16 fd_sb_cnt_idx;
+ u16 fd_atr_cnt_idx;
+#ifdef CONFIG_I40E_VXLAN
+ __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
+ u16 pending_vxlan_bitmap;
+
+#endif
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
@@ -217,24 +259,28 @@ struct i40e_pf {
#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
-#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
-#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
-#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
-#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
-#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
-#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
-#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
-#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
-#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
-#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
-#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
-#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
-#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
-
- u16 num_tx_queues;
- u16 num_rx_queues;
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 8)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 9)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 13)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 14)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 15)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 18)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 19)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_FD_SB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FD_ATR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_PTP (u64)(1 << 25)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 26)
+#ifdef CONFIG_I40E_VXLAN
+#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
+#endif
+#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
+
+ /* tracks features that get auto disabled by errors */
+ u64 auto_disable_flags;
bool stat_offsets_loaded;
struct i40e_hw_port_stats stats;
@@ -248,6 +294,7 @@ struct i40e_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u16 sw_int_count; /* SW interrupt count */
struct mutex switch_mutex;
u16 lan_vsi; /* our default LAN VSI */
@@ -265,12 +312,13 @@ struct i40e_pf {
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct i40e_aqc_get_switch_config_data *sw_config;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
#endif /* CONFIG_DEBUG_FS */
+ u16 instance; /* A unique number per i40e_pf instance in the system */
+
/* sr-iov config info */
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
@@ -288,6 +336,18 @@ struct i40e_pf {
u32 fcoe_hmc_filt_num;
u32 fcoe_hmc_cntx_num;
struct i40e_filter_control_settings filter_settings;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_caps;
+ struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
+ unsigned long last_rx_ptp_check;
+ spinlock_t tmreg_lock; /* Used to protect the device time registers. */
+ u64 ptp_base_adj;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
+ bool ptp_tx;
+ bool ptp_rx;
};
struct i40e_mac_filter {
@@ -347,9 +407,9 @@ struct i40e_vsi {
u32 rx_buf_failed;
u32 rx_page_failed;
- /* These are arrays of rings, allocated at run-time */
- struct i40e_ring *rx_rings;
- struct i40e_ring *tx_rings;
+ /* These are containers of ring pointers, allocated at run-time */
+ struct i40e_ring **rx_rings;
+ struct i40e_ring **tx_rings;
u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,9 +426,10 @@ struct i40e_vsi {
u8 dtype;
/* List of q_vectors allocated to this VSI */
- struct i40e_q_vector *q_vectors;
+ struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
+ bool irqs_ready;
u16 seid; /* HW index of this VSI (absolute index) */
u16 id; /* VSI number */
@@ -422,8 +483,9 @@ struct i40e_q_vector {
u8 num_ringpairs; /* total number of ring pairs in vector */
- char name[IFNAMSIZ + 9];
cpumask_t affinity_mask;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -441,15 +503,13 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ "f%d.%d a%d.%d n%02x.%02x e%08x",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
- >> I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
- >> I40E_NVM_VERSION_MID_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
- >> I40E_NVM_VERSION_LO_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+ I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+ I40E_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack);
return buf;
@@ -490,11 +550,21 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
}
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the pf struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -502,16 +572,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-/* needed by i40e_main.c */
-void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
- struct i40e_ring *tx_ring);
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add);
-
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+int i40e_get_current_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -524,10 +591,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
struct i40e_vsi *start_vsi);
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);
i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
@@ -544,7 +614,10 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -554,5 +627,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
-
+#ifdef CONFIG_I40E_DCB
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg);
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
+void i40e_dcbnl_setup(struct i40e_vsi *vsi);
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg);
+#endif /* CONFIG_I40E_DCB */
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi);
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
+void i40e_ptp_set_increment(struct i40e_pf *pf);
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_init(struct i40e_pf *pf);
+void i40e_ptp_stop(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index cfef7fc32cd..7a027499fc5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,6 +30,18 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
+static void i40e_resume_aq(struct i40e_hw *hw);
+
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -43,13 +54,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
if (hw->mac.type == I40E_MAC_VF) {
hw->aq.asq.tail = I40E_VF_ATQT1;
hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
hw->aq.arq.tail = I40E_VF_ARQT1;
hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
} else {
hw->aq.asq.tail = I40E_PF_ATQT;
hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
hw->aq.arq.tail = I40E_PF_ARQT;
hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
}
}
@@ -60,9 +75,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
@@ -70,21 +84,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
if (ret_code)
return ret_code;
- hw->aq.asq.desc = hw->aq.asq_mem.va;
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
-
- ret_code = i40e_allocate_virt_mem(hw, &mem,
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
(hw->aq.num_asq_entries *
sizeof(struct i40e_asq_cmd_details)));
if (ret_code) {
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
return ret_code;
}
- hw->aq.asq.details = mem.va;
-
return ret_code;
}
@@ -96,16 +103,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
i40e_status ret_code;
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- hw->aq.arq.desc = hw->aq.arq_mem.va;
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
return ret_code;
}
@@ -119,14 +121,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
static void i40e_free_adminq_asq(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
-
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
- hw->aq.asq_mem.va = NULL;
- hw->aq.asq_mem.pa = 0;
- mem.va = hw->aq.asq.details;
- i40e_free_virt_mem(hw, &mem);
- hw->aq.asq.details = NULL;
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
/**
@@ -138,20 +133,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
**/
static void i40e_free_adminq_arq(struct i40e_hw *hw)
{
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
- hw->aq.arq_mem.va = NULL;
- hw->aq.arq_mem.pa = 0;
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
}
/**
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
struct i40e_aq_desc *desc;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
@@ -160,11 +152,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
*/
/* buffer_info structures do not need alignment */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_arq_entries; i++) {
@@ -206,29 +198,27 @@ unwind_alloc_arq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
return ret_code;
}
/**
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
i40e_status ret_code;
- struct i40e_virt_mem mem;
struct i40e_dma_mem *bi;
int i;
/* No mapped memory needed yet, just the buffer info structures */
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
- sizeof(struct i40e_dma_mem)));
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
/* allocate the mapped buffers */
for (i = 0; i < hw->aq.num_asq_entries; i++) {
@@ -248,35 +238,36 @@ unwind_alloc_asq_bufs:
i--;
for (; i >= 0; i--)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
return ret_code;
}
/**
* i40e_free_arq_bufs - Free receive queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_arq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
+ /* free descriptors */
for (i = 0; i < hw->aq.num_arq_entries; i++)
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- mem.va = hw->aq.arq.r.arq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
}
/**
* i40e_free_asq_bufs - Free send queue buffer info elements
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
static void i40e_free_asq_bufs(struct i40e_hw *hw)
{
- struct i40e_virt_mem mem;
int i;
/* only unmap if the address is non-NULL */
@@ -284,63 +275,98 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
if (hw->aq.asq.r.asq_bi[i].pa)
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- /* now free the buffer info list */
- mem.va = hw->aq.asq.r.asq_bi;
- i40e_free_virt_mem(hw, &mem);
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
}
/**
* i40e_config_asq_regs - configure ASQ registers
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
* i40e_config_arq_regs - ARQ register configuration
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
* i40e_init_asq - main initialization routine for ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* This is the main initialization routine for the Admin Send Queue
* Prior to calling this function, drivers *MUST* set the following fields
@@ -383,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -397,7 +425,7 @@ init_adminq_exit:
/**
* i40e_init_arq - initialize ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main initialization routine for the Admin Receive (Event) Queue.
* Prior to calling this function, drivers *MUST* set the following fields
@@ -440,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -454,7 +484,7 @@ init_adminq_exit:
/**
* i40e_shutdown_asq - shutdown the ASQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Send Queue
**/
@@ -466,10 +496,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ATQLEN1, 0);
- else
- wr32(hw, I40E_PF_ATQLEN, 0);
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.asq_mutex);
@@ -478,8 +507,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_asq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_asq(hw);
mutex_unlock(&hw->aq.asq_mutex);
@@ -488,7 +515,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
/**
* i40e_shutdown_arq - shutdown ARQ
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* The main shutdown routine for the Admin Receive Queue
**/
@@ -500,10 +527,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
return I40E_ERR_NOT_READY;
/* Stop firmware AdminQ processing */
- if (hw->mac.type == I40E_MAC_VF)
- wr32(hw, I40E_VF_ARQLEN1, 0);
- else
- wr32(hw, I40E_PF_ARQLEN, 0);
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
/* make sure lock is available */
mutex_lock(&hw->aq.arq_mutex);
@@ -512,8 +538,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/* free ring buffers */
i40e_free_arq_bufs(hw);
- /* free the ring descriptors */
- i40e_free_adminq_arq(hw);
mutex_unlock(&hw->aq.arq_mutex);
@@ -522,7 +546,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
/**
* i40e_init_adminq - main initialization routine for Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
* in the hw->aq structure:
@@ -533,8 +557,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
**/
i40e_status i40e_init_adminq(struct i40e_hw *hw)
{
- u16 eetrack_lo, eetrack_hi;
i40e_status ret_code;
+ u16 eetrack_lo, eetrack_hi;
+ int retry = 0;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -562,23 +587,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (ret_code)
goto init_adminq_free_asq;
- ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
- &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
- NULL);
- if (ret_code)
+ /* There are some cases where the firmware may not be quite ready
+ * for AdminQ operations, so we retry the AdminQ setup a few times
+ * if we see timeouts in this first AQ call.
+ */
+ do {
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver,
+ &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver,
+ &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ break;
+ retry++;
+ msleep(100);
+ i40e_resume_aq(hw);
+ } while (retry < 10);
+ if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
- ret_code = I40E_ERR_FIRMWARE_API_VERSION;
- goto init_adminq_free_arq;
- }
+ /* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+
+ /* pre-emptive resource lock release */
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
+
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
@@ -600,12 +643,15 @@ init_adminq_exit:
/**
* i40e_shutdown_adminq - shutdown routine for the Admin Queue
- * @hw: pointer to the hardware structure
+ * @hw: pointer to the hardware structure
**/
i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_queue_shutdown(hw, true);
+
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
@@ -616,7 +662,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
/**
* i40e_clean_asq - cleans Admin send queue
- * @asq: pointer to the adminq send ring
+ * @hw: pointer to the hardware structure
*
* returns the number of free desc
**/
@@ -637,9 +683,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
ntc++;
if (ntc == asq->count)
ntc = 0;
@@ -659,12 +704,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
-bool i40e_asq_done(struct i40e_hw *hw)
+static bool i40e_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
- return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
}
@@ -674,7 +719,7 @@ bool i40e_asq_done(struct i40e_hw *hw)
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
- * @opaque: pointer to info to be used in async cleanup
+ * @cmd_details: pointer to command details structure
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
@@ -699,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -826,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -854,7 +908,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
}
/**
@@ -912,7 +966,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
"AQRX: Event received with error 0x%X.\n",
hw->aq.arq_last_status);
} else {
- memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_size = min(datalen, e->msg_size);
if (e->msg_buf != NULL && (e->msg_size != 0))
@@ -920,11 +974,19 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
*/
bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
desc->datalen = cpu_to_le16((u16)bi->size);
desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
@@ -947,36 +1009,16 @@ clean_arq_element_out:
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
+static void i40e_resume_aq(struct i40e_hw *hw)
{
- u32 reg = 0;
-
/* Registers are reset after PF reset */
hw->aq.asq.next_to_use = 0;
hw->aq.asq.next_to_clean = 0;
i40e_config_asq_regs(hw);
- reg = hw->aq.num_asq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ATQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ATQLEN, reg);
- }
hw->aq.arq.next_to_use = 0;
hw->aq.arq.next_to_clean = 0;
i40e_config_arq_regs(hw);
- reg = hw->aq.num_arq_entries;
-
- if (hw->mac.type == I40E_MAC_VF) {
- reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_VF_ARQLEN1, reg);
- } else {
- reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
- wr32(hw, I40E_PF_ARQLEN, reg);
- }
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 22e5ed683e4..b1552fbc48a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -32,20 +31,20 @@
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc))[i]))
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
#define I40E_ADMINQ_DESC_ALIGNMENT 4096
struct i40e_adminq_ring {
- void *desc; /* Descriptor ring memory */
- void *details; /* ASQ details */
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
union {
struct i40e_dma_mem *asq_bi;
struct i40e_dma_mem *arq_bi;
} r;
- u64 dma_addr; /* Physical address of the ring */
u16 count; /* Number of descriptors */
u16 rx_buf_len; /* Admin Receive Queue buffer length */
@@ -56,6 +55,7 @@ struct i40e_adminq_ring {
/* used for queue tracking */
u32 head;
u32 tail;
+ u32 len;
};
/* ASQ transaction details */
@@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {
};
#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
/* ARQ event information */
struct i40e_arq_event_info {
@@ -90,13 +90,11 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
- struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
- struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
-
/* last status values on send and receive queues */
enum i40e_admin_queue_err asq_last_status;
enum i40e_admin_queue_err arq_last_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index e61ebdd5a5f..15f289f2917 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -35,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -124,6 +123,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -137,10 +137,13 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
/* LAA */
- i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -180,9 +183,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +223,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +273,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -317,13 +318,15 @@ struct i40e_aqc_get_version {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
-/* Send driver version (direct 0x0002) */
+/* Send driver version (indirect 0x0002) */
struct i40e_aqc_driver_version {
u8 driver_major_ver;
u8 driver_minor_ver;
u8 driver_build_ver;
u8 driver_subbuild_ver;
- u8 reserved[12];
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
@@ -337,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -479,7 +490,7 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
-/* Manage MAC Address Read Command (0x0107) */
+/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
#define I40E_AQC_LAN_ADDR_VALID 0x10
@@ -517,6 +528,16 @@ struct i40e_aqc_mac_address_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -639,13 +660,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
-/* Add VSI (indirect 0x210)
+/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
*
- * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
- * use the generic i40e_aqc_switch_seid descriptor format
- * use the same completion and data structure as Add VSI
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
*/
struct i40e_aqc_add_get_update_vsi {
__le16 uplink_seid;
@@ -664,7 +687,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1026,7 +1048,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1179,33 +1203,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} v4;
struct {
u8 data[16];
- } v6;
- } ipaddr;
+ } v6;
+ } ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
-#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0007 reserved */
/* 0x0008 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
- __le32 key_low;
- __le32 key_high;
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
- u8 reserved[14];
+ u8 reserved2[14];
/* response section */
u8 allocation_result;
#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
@@ -1259,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1397,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1469,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1509,6 +1535,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1519,7 +1547,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1548,16 +1579,13 @@ struct i40e_aqc_module_desc {
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
- u8 link_speed; /* bitmap using the above enum */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
u8 abilities;
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1582,6 +1610,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
u8 link_speed;
u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
@@ -1662,6 +1694,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1713,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1745,6 +1785,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -1914,22 +1995,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
- u8 protocol_index;
-#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
-#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
- u8 reserved[12];
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
/* remove UDP Tunnel command (0x0B01) */
struct i40e_aqc_remove_udp_tunnel {
u8 reserved[2];
u8 index; /* 0 to 15 */
- u8 pf_filters;
- u8 total_filters;
- u8 reserved2[11];
+ u8 reserved2[13];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
@@ -1937,28 +2029,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
struct i40e_aqc_del_udp_tunnel_completion {
__le16 udp_port;
u8 index; /* 0 to 15 */
- u8 multiple_entries;
- u8 tunnels_used;
- u8 reserved;
- u8 tunnels_free;
- u8 reserved1[9];
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved1[11];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
/* tunnel key structure 0x0B10 */
+
struct i40e_aqc_tunnel_key_structure {
- __le16 key1_off;
- __le16 key1_len;
- __le16 key2_off;
- __le16 key2_len;
- __le16 flags;
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
/* response flags */
#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
- u8 resreved[6];
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
@@ -2052,6 +2148,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
#define I40E_AQ_CLUSTER_ID_DCB 8
#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
struct i40e_aqc_debug_dump_internals {
u8 cluster_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index 3b1cc214f9d..926811ad44a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1e4ea134975..6e65f19dd6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -43,20 +42,18 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
- case I40E_SFP_XL710_DEVICE_ID:
- case I40E_SFP_X710_DEVICE_ID:
- case I40E_QEMU_DEVICE_ID:
- case I40E_KX_A_DEVICE_ID:
- case I40E_KX_B_DEVICE_ID:
- case I40E_KX_C_DEVICE_ID:
- case I40E_KX_D_DEVICE_ID:
- case I40E_QSFP_A_DEVICE_ID:
- case I40E_QSFP_B_DEVICE_ID:
- case I40E_QSFP_C_DEVICE_ID:
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
hw->mac.type = I40E_MAC_XL710;
break;
- case I40E_VF_DEVICE_ID:
- case I40E_VF_HV_DEVICE_ID:
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
break;
default:
@@ -75,7 +72,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
- * @cap: pointer to adminq command descriptor
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
* @buffer: pointer to command buffer
*
* Dumps debug log about adminq command with descriptor contents.
@@ -126,6 +124,413 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
*
@@ -142,14 +547,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
i40e_status status = 0;
u32 reg;
- hw->phy.get_link_info = true;
-
- /* Determine port number */
- reg = rd32(hw, I40E_PFGEN_PORTNUM);
- reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
- I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
- hw->port = (u8)reg;
-
i40e_set_mac_type(hw);
switch (hw->mac.type) {
@@ -160,6 +557,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
break;
}
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ /* Determine the PF number based on the PCI fn */
+ reg = rd32(hw, I40E_GLPCI_CAPSUP);
+ if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK)
+ hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func);
+ else
+ hw->pf_id = (u8)hw->bus.func;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -210,8 +622,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
cmd_data->command_flags = cpu_to_le16(flags);
- memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
- memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+ cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]);
+ cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) |
+ ((u32)mac_addr[3] << 16) |
+ ((u32)mac_addr[4] << 8) |
+ mac_addr[5]);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -240,32 +655,83 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
- * i40e_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
*
- * Tests a MAC address to ensure it is a valid Individual Address
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
**/
-i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
{
- i40e_status status = 0;
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
- /* Make sure it is not a multicast address */
- if (I40E_IS_MULTICAST(mac_addr)) {
- hw_dbg(hw, "MAC address is multicast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- } else if (I40E_IS_BROADCAST(mac_addr)) {
- hw_dbg(hw, "MAC address is broadcast\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
- hw_dbg(hw, "MAC address is all zeros\n");
- status = I40E_ERR_INVALID_MAC_ADDR;
+ if (abs_queue_idx >= 128)
+ reg_block = abs_queue_idx / 128;
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+ enum i40e_media_type media;
+
+ switch (hw->phy.link_info.phy_type) {
+ case I40E_PHY_TYPE_10GBASE_SR:
+ case I40E_PHY_TYPE_10GBASE_LR:
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ media = I40E_MEDIA_TYPE_FIBER;
+ break;
+ case I40E_PHY_TYPE_100BASE_TX:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_10GBASE_T:
+ media = I40E_MEDIA_TYPE_BASET;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ media = I40E_MEDIA_TYPE_DA;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ case I40E_PHY_TYPE_10GBASE_KR:
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ media = I40E_MEDIA_TYPE_BACKPLANE;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ default:
+ media = I40E_MEDIA_TYPE_UNKNOWN;
+ break;
}
- return status;
+
+ return media;
}
+#define I40E_PF_RESET_WAIT_COUNT_A0 200
+#define I40E_PF_RESET_WAIT_COUNT 100
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -275,7 +741,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)
**/
i40e_status i40e_pf_reset(struct i40e_hw *hw)
{
- u32 wait_cnt = 0;
+ u32 cnt = 0;
+ u32 cnt1 = 0;
u32 reg = 0;
u32 grst_del;
@@ -285,7 +752,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
*/
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -296,17 +763,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
return I40E_ERR_RESET_FAILED;
}
- /* Determine the PF number based on the PCI fn */
- hw->pf_id = (u8)hw->bus.func;
+ /* Now Wait for the FW to be ready */
+ for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+ reg = rd32(hw, I40E_GLNVM_ULD);
+ reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+ if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+ hw_dbg(hw, "Core and Global modules ready %d\n", cnt1);
+ break;
+ }
+ usleep_range(10000, 20000);
+ }
+ if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+ I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+ hw_dbg(hw, "wait for FW Reset complete timedout\n");
+ hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
+ return I40E_ERR_RESET_FAILED;
+ }
/* If there was a Global Reset in progress when we got here,
* we don't need to do the PF Reset
*/
- if (!wait_cnt) {
+ if (!cnt) {
+ if (hw->revision_id == 0)
+ cnt = I40E_PF_RESET_WAIT_COUNT_A0;
+ else
+ cnt = I40E_PF_RESET_WAIT_COUNT;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
- for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ for (; cnt; cnt--) {
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
@@ -319,6 +806,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
}
i40e_clear_pxe_mode(hw);
+
return 0;
}
@@ -333,12 +821,53 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
{
u32 reg;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
- wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+
+ if (hw->revision_id == 0) {
+ /* As a work around clear PXE_MODE instead of setting it */
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK)));
+ } else {
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+ }
}
/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+ u32 gpio_val = 0;
+ u32 port;
+
+ if (!hw->func_caps.led[idx])
+ return 0;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+ I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ /* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+ * if it is not our port then ignore
+ */
+ if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+ (port != hw->port))
+ return 0;
+
+ return gpio_val;
+}
+
+#define I40E_LED0 22
+#define I40E_LINK_ACTIVITY 0xC
+
+/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
*
@@ -349,24 +878,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 gpio_val = 0;
u32 mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
-
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
@@ -376,57 +901,69 @@ u32 i40e_led_get(struct i40e_hw *hw)
/**
* i40e_led_set - set new on/off mode
* @hw: pointer to the hw struct
- * @mode: 0=off, else on (see EAS for mode details)
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: true if the LED should blink when on, false if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
**/
-void i40e_led_set(struct i40e_hw *hw, u32 mode)
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 gpio_val = 0;
- u32 led_mode = 0;
- u32 port;
int i;
- for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
- if (!hw->func_caps.led[i])
- continue;
+ if (mode & 0xfffffff0)
+ hw_dbg(hw, "invalid mode passed in %X\n", mode);
- gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
- port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
- >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+ /* as per the documentation GPIO 22-29 are the LED
+ * GPIO pins named LED0..LED7
+ */
+ for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+ u32 gpio_val = i40e_led_is_mine(hw, i);
- if (port != hw->port)
+ if (!gpio_val)
continue;
- led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
- I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
- gpio_val |= led_mode;
+ /* this & is a bit of paranoia, but serves as a range check */
+ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+ if (mode == I40E_LINK_ACTIVITY)
+ blink = false;
+
+ gpio_val |= (blink ? 1 : 0) <<
+ I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT;
+
wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ break;
}
}
/* Admin command wrappers */
+
/**
- * i40e_aq_queue_shutdown
+ * i40e_aq_clear_pxe_mode
* @hw: pointer to the hw struct
- * @unloading: is the driver unloading itself
+ * @cmd_details: pointer to command details structure or NULL
*
- * Tell the Firmware that we're shutting down the AdminQ and whether
- * or not the driver is unloading as well.
+ * Tell the firmware that the driver is taking over from PXE
**/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- struct i40e_aqc_queue_shutdown *cmd =
- (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
+ i40e_aqc_opc_clear_pxe_mode);
- if (unloading)
- cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
return status;
}
@@ -490,15 +1027,23 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
goto aq_get_link_info_exit;
/* save off old link status information */
- memcpy(&hw->phy.link_info_old, hw_link_info,
- sizeof(struct i40e_link_status));
+ hw->phy.link_info_old = *hw_link_info;
/* update link status */
hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw->phy.media_type = i40e_get_media_type(hw);
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
+ hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -519,7 +1064,7 @@ aq_get_link_info_exit:
/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Add a VSI context to the hardware.
@@ -545,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -571,7 +1114,8 @@ aq_add_vsi_exit:
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -665,7 +1209,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
/**
* i40e_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
@@ -673,8 +1217,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@@ -683,11 +1227,9 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -707,7 +1249,7 @@ aq_get_vsi_params_exit:
/**
* i40e_aq_update_vsi_params
* @hw: pointer to the hw struct
- * @vsi: pointer to a vsi context struct
+ * @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
*
* Update a VSI context.
@@ -717,17 +1259,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_switch_seid *cmd =
- (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
- cmd->seid = cpu_to_le16(vsi_ctx->seid);
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -810,7 +1350,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
/**
* i40e_aq_send_driver_version
* @hw: pointer to the hw struct
- * @event: driver event: driver ok, start or stop
* @dv: driver's major, minor version
* @cmd_details: pointer to command details structure or NULL
*
@@ -824,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -835,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -873,6 +1420,7 @@ i40e_get_link_status_exit:
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
+ * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
* @cmd_details: pointer to command details structure or NULL
*
@@ -881,7 +1429,8 @@ i40e_get_link_status_exit:
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *veb_seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -907,6 +1456,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+ if (enable_l2_filtering)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
cmd->veb_flags = cpu_to_le16(veb_flags);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -922,10 +1475,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* @hw: pointer to the hw struct
* @veb_seid: the SEID of the VEB to query
* @switch_id: the uplink switch id
- * @floating_veb: set to true if the VEB is floating
+ * @floating: set to true if the VEB is floating
* @statistic_index: index of the stats counter block for this VEB
* @vebs_used: number of VEB's used by function
- * @vebs_unallocated: total VEB's not reserved by any function
+ * @vebs_free: total VEB's not reserved by any function
* @cmd_details: pointer to command details structure or NULL
*
* This retrieves the parameters for a particular VEB, specified by
@@ -1059,89 +1612,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
- * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of vlan filters to be added
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
- * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
- * @hw: pointer to the hw struct
- * @seid: VSI for the vlan filters
- * @v_list: list of macvlans to be removed
- * @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
- **/
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
- u16 buf_size;
-
- if (count == 0 || !v_list || !hw)
- return I40E_ERR_PARAM;
-
- buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
-
- /* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
- cmd->num_addresses = cpu_to_le16(count);
- cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
- cmd->seid[1] = 0;
- cmd->seid[2] = 0;
-
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
-
- status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
- cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cmd_details: pointer to command details
@@ -1371,9 +1846,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ p = &hw->dev_caps;
else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
- p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ p = &hw->func_caps;
else
return;
@@ -1496,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
}
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = false;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
@@ -1519,8 +2000,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
- i40e_status status = 0;
struct i40e_aq_desc desc;
+ i40e_status status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -1681,6 +2162,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_udp_tunnel *cmd =
+ (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ struct i40e_aqc_del_udp_tunnel_completion *resp =
+ (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+ cmd->udp_port = cpu_to_le16(udp_port);
+ cmd->protocol_type = protocol_index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status)
+ *filter_index = resp->index;
+
+ return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_remove_udp_tunnel *cmd =
+ (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+ cmd->index = index;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_delete_element - Delete switch element
* @hw: pointer to the hw struct
* @seid: the SEID to delete from the switch
@@ -1709,6 +2247,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
* @hw: pointer to the hw struct
* @seid: seid for the physical port/switching component/vsi
@@ -1770,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
@@ -1787,6 +2376,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
}
/**
+ * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component connected to Physical Port
+ * @ets_data: Buffer holding ETS parameters
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ sizeof(*ets_data), opcode, cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
* i40e_aq_query_vsi_bw_config - Query VSI BW configuration
* @hw: pointer to the hw struct
* @seid: seid of the VSI
@@ -1888,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 pe_cntx_size, pe_filt_size;
- u32 fcoe_fmax, pe_fmax;
+ u32 fcoe_fmax;
u32 val;
/* Validate FCoE settings passed */
@@ -1963,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return I40E_ERR_INVALID_SIZE;
- /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
- val = rd32(hw, I40E_GLHMC_PEXFMAX);
- pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
- >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
- if (pe_filt_size + pe_cntx_size > pe_fmax)
- return I40E_ERR_INVALID_SIZE;
-
return 0;
}
@@ -2039,3 +2655,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return 0;
}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_control_packet_filter *cmd =
+ (struct i40e_aqc_add_remove_control_packet_filter *)
+ &desc.params.raw;
+ struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ (struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (vsi_seid == 0)
+ return I40E_ERR_PARAM;
+
+ if (is_add) {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_control_packet_filter);
+ cmd->queue = cpu_to_le16(queue);
+ } else {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_control_packet_filter);
+ }
+
+ if (mac_addr)
+ memcpy(cmd->mac, mac_addr, ETH_ALEN);
+
+ cmd->etype = cpu_to_le16(ethtype);
+ cmd->flags = cpu_to_le16(flags);
+ cmd->seid = cpu_to_le16(vsi_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && stats) {
+ stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used);
+ stats->etype_used = le16_to_cpu(resp->etype_used);
+ stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free);
+ stats->etype_free = le16_to_cpu(resp->etype_free);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+ hw->bus.type = i40e_bus_type_pci_express;
+
+ switch (link_status & PCI_EXP_LNKSTA_NLW) {
+ case PCI_EXP_LNKSTA_NLW_X1:
+ hw->bus.width = i40e_bus_width_pcie_x1;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X2:
+ hw->bus.width = i40e_bus_width_pcie_x2;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X4:
+ hw->bus.width = i40e_bus_width_pcie_x4;
+ break;
+ case PCI_EXP_LNKSTA_NLW_X8:
+ hw->bus.width = i40e_bus_width_pcie_x8;
+ break;
+ default:
+ hw->bus.width = i40e_bus_width_unknown;
+ break;
+ }
+
+ switch (link_status & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ hw->bus.speed = i40e_bus_speed_2500;
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ hw->bus.speed = i40e_bus_speed_5000;
+ break;
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ hw->bus.speed = i40e_bus_speed_8000;
+ break;
+ default:
+ hw->bus.speed = i40e_bus_speed_unknown;
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
new file mode 100644
index 00000000000..036570d7617
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -0,0 +1,472 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_dcb.h"
+
+/**
+ * i40e_get_dcbx_status
+ * @hw: pointer to the hw struct
+ * @status: Embedded DCBX Engine Status
+ *
+ * Get the DCBX status from the Firmware
+ **/
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+{
+ u32 reg;
+
+ if (!status)
+ return I40E_ERR_PARAM;
+
+ reg = rd32(hw, I40E_PRTDCB_GENS);
+ *status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
+ I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT);
+
+ return 0;
+}
+
+/**
+ * i40e_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_ieee_ets_config *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >>
+ I40E_IEEE_ETS_WILLING_SHIFT);
+ etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >>
+ I40E_IEEE_ETS_CBS_SHIFT);
+ etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >>
+ I40E_IEEE_ETS_MAXTC_SHIFT);
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ etscfg->prioritytable[i * 2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ etscfg->prioritytable[i * 2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ etscfg->tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ u8 priority;
+ int i;
+
+ /* Move offset to priority table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >>
+ I40E_IEEE_ETS_PRIO_1_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2] = priority;
+ priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >>
+ I40E_IEEE_ETS_PRIO_0_SHIFT);
+ dcbcfg->etsrec.prioritytable[i*2 + 1] = priority;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tcbwtable[i] = buf[offset++];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ dcbcfg->etsrec.tsatable[i] = buf[offset++];
+}
+
+/**
+ * i40e_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >>
+ I40E_IEEE_PFC_WILLING_SHIFT);
+ dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >>
+ I40E_IEEE_PFC_MBC_SHIFT);
+ dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >>
+ I40E_IEEE_PFC_CAP_SHIFT);
+ dcbcfg->pfc.pfcenable = buf[1];
+}
+
+/**
+ * i40e_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ **/
+static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength;
+ u16 offset = 0;
+ u16 length;
+ int i = 0;
+ u8 *buf;
+
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ buf = tlv->tlvinfo;
+
+ /* The App priority table starts 5 octets after TLV header */
+ length -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < length) {
+ dcbcfg->app[i].priority = (u8)((buf[offset] &
+ I40E_IEEE_APP_PRIO_MASK) >>
+ I40E_IEEE_APP_PRIO_SHIFT);
+ dcbcfg->app[i].selector = (u8)((buf[offset] &
+ I40E_IEEE_APP_SEL_MASK) >>
+ I40E_IEEE_APP_SEL_SHIFT);
+ dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+ I40E_LLDP_TLV_SUBTYPE_SHIFT);
+ switch (subtype) {
+ case I40E_IEEE_SUBTYPE_ETS_CFG:
+ i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_ETS_REC:
+ i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_PFC_CFG:
+ i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_SUBTYPE_APP_PRI:
+ i40e_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ **/
+static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >>
+ I40E_LLDP_TLV_OUI_SHIFT);
+ switch (oui) {
+ case I40E_IEEE_8021QAZ_OUI:
+ i40e_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_lldp_to_dcb_config
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ **/
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_lldp_org_tlv *tlv;
+ u16 type;
+ u16 length;
+ u16 typelength;
+ u16 offset = 0;
+
+ if (!lldpmib || !dcbcfg)
+ return I40E_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelength = ntohs(tlv->typelength);
+ type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+ I40E_LLDP_TLV_TYPE_SHIFT);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ offset += sizeof(typelength) + length;
+
+ /* END TLV or beyond LLDPDU size */
+ if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+ break;
+
+ switch (type) {
+ case I40E_TLV_TYPE_ORG:
+ i40e_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) +
+ length);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_aq_get_dcb_config
+ * @hw: pointer to the hw struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the Firmware
+ **/
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ i40e_status ret = 0;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type,
+ (void *)lldpmib, I40E_LLDPDU_SIZE,
+ NULL, NULL, NULL);
+ if (ret)
+ goto free_mem;
+
+ /* Parse LLDP MIB to get dcb configuration */
+ ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg);
+
+free_mem:
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_get_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get DCB configuration from the Firmware
+ **/
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ /* Get Local DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+ &hw->local_dcbx_config);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+out:
+ return ret;
+}
+
+/**
+ * i40e_init_dcb
+ * @hw: pointer to the hw struct
+ *
+ * Update DCB configuration from the Firmware
+ **/
+i40e_status i40e_init_dcb(struct i40e_hw *hw)
+{
+ i40e_status ret = 0;
+
+ if (!hw->func_caps.dcb)
+ return ret;
+
+ /* Get DCBX status */
+ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
+ if (ret)
+ return ret;
+
+ /* Check the DCBX Status */
+ switch (hw->dcbx_status) {
+ case I40E_DCBX_STATUS_DONE:
+ case I40E_DCBX_STATUS_IN_PROGRESS:
+ /* Get current DCBX configuration */
+ ret = i40e_get_dcb_config(hw);
+ break;
+ case I40E_DCBX_STATUS_DISABLED:
+ return ret;
+ case I40E_DCBX_STATUS_NOT_STARTED:
+ case I40E_DCBX_STATUS_MULTIPLE_PEERS:
+ default:
+ break;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
new file mode 100644
index 00000000000..34cf1c30c7f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DCB_H_
+#define _I40E_DCB_H_
+
+#include "i40e_type.h"
+
+#define I40E_DCBX_STATUS_NOT_STARTED 0
+#define I40E_DCBX_STATUS_IN_PROGRESS 1
+#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
+#define I40E_DCBX_STATUS_DISABLED 7
+
+#define I40E_TLV_TYPE_END 0
+#define I40E_TLV_TYPE_ORG 127
+
+#define I40E_IEEE_8021QAZ_OUI 0x0080C2
+#define I40E_IEEE_SUBTYPE_ETS_CFG 9
+#define I40E_IEEE_SUBTYPE_ETS_REC 10
+#define I40E_IEEE_SUBTYPE_PFC_CFG 11
+#define I40E_IEEE_SUBTYPE_APP_PRI 12
+
+/* Defines for LLDP TLV header */
+#define I40E_LLDP_TLV_LEN_SHIFT 0
+#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
+#define I40E_LLDP_TLV_TYPE_SHIFT 9
+#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
+#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
+#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
+#define I40E_LLDP_TLV_OUI_SHIFT 8
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+
+/* Defines for IEEE ETS TLV */
+#define I40E_IEEE_ETS_MAXTC_SHIFT 0
+#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
+#define I40E_IEEE_ETS_CBS_SHIFT 6
+#define I40E_IEEE_ETS_CBS_MASK (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_WILLING_SHIFT 7
+#define I40E_IEEE_ETS_WILLING_MASK (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
+#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
+#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
+#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
+
+/* Defines for IEEE TSA types */
+#define I40E_IEEE_TSA_STRICT 0
+#define I40E_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define I40E_IEEE_PFC_CAP_SHIFT 0
+#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
+#define I40E_IEEE_PFC_MBC_SHIFT 6
+#define I40E_IEEE_PFC_MBC_MASK (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_WILLING_SHIFT 7
+#define I40E_IEEE_PFC_WILLING_MASK (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+
+/* Defines for IEEE APP TLV */
+#define I40E_IEEE_APP_SEL_SHIFT 0
+#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
+#define I40E_IEEE_APP_PRIO_SHIFT 5
+#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+
+
+#pragma pack(1)
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct i40e_lldp_org_tlv {
+ __be16 typelength;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw);
+#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
new file mode 100644
index 00000000000..00bc0cdb3a0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -0,0 +1,316 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_I40E_DCB
+#include "i40e.h"
+#include <net/dcbnl.h>
+
+/**
+ * i40e_get_pfc_delay - retrieve PFC Link Delay
+ * @hw: pointer to hardware struct
+ * @delay: holds the PFC Link delay value
+ *
+ * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA
+ **/
+static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
+{
+ u32 val;
+
+ val = rd32(hw, I40E_PRTDCB_GENC);
+ *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ I40E_PRTDCB_GENC_PFCLDA_SHIFT);
+}
+
+/**
+ * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Returns local IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ ets->willing = dcbxcfg->etscfg.willing;
+ ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->cbs = dcbxcfg->etscfg.cbs;
+ memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable,
+ sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->prio_tc));
+ memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
+ sizeof(ets->tc_reco_bw));
+ memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
+ sizeof(ets->tc_reco_tsa));
+ memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable,
+ sizeof(ets->reco_prio_tc));
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the PFC information
+ *
+ * Returns local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ pfc->pfc_cap = dcbxcfg->pfc.pfccap;
+ pfc->pfc_en = dcbxcfg->pfc.pfcenable;
+ pfc->mbc = dcbxcfg->pfc.mbc;
+ i40e_get_pfc_delay(hw, &pfc->delay);
+
+ /* Get Requests/Indicatiosn */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = pf->stats.priority_xoff_tx[i];
+ pfc->indications[i] = pf->stats.priority_xoff_rx[i];
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getdcbx - retrieve current DCBx capability
+ * @netdev: the corresponding netdev
+ *
+ * Returns DCBx capability features
+ **/
+static u8 i40e_dcbnl_getdcbx(struct net_device *dev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ return pf->dcbx_cap;
+}
+
+/**
+ * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx
+ * @netdev: the corresponding netdev
+ *
+ * Returns the SAN MAC address used for LLDP exchange
+ **/
+static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev,
+ u8 *perm_addr)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ int i, j;
+
+ memset(perm_addr, 0xff, MAX_ADDR_LEN);
+
+ for (i = 0; i < dev->addr_len; i++)
+ perm_addr[i] = pf->hw.mac.perm_addr[i];
+
+ for (j = 0; j < dev->addr_len; j++, i++)
+ perm_addr[i] = pf->hw.mac.san_addr[j];
+}
+
+static const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = i40e_dcbnl_ieee_getets,
+ .ieee_getpfc = i40e_dcbnl_ieee_getpfc,
+ .getdcbx = i40e_dcbnl_getdcbx,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+};
+
+/**
+ * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config
+ * @vsi: the corresponding vsi
+ *
+ * Set up all the IEEE APPs in the DCBNL App Table and generate event for
+ * other settings
+ **/
+void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ struct dcb_app sapp;
+ u8 prio, tc_map;
+ int i;
+
+ /* DCB not enabled */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ dcbxcfg = &hw->local_dcbx_config;
+
+ /* Set up all the App TLVs if DCBx is negotiated */
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ prio = dcbxcfg->app[i].priority;
+ tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+
+ /* Add APP only if the TC is enabled for this VSI */
+ if (tc_map & vsi->tc_config.enabled_tc) {
+ sapp.selector = dcbxcfg->app[i].selector;
+ sapp.protocol = dcbxcfg->app[i].protocolid;
+ sapp.priority = prio;
+ dcb_ieee_setapp(dev, &sapp);
+ }
+ }
+
+ /* Notify user-space of the changes */
+ dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
+}
+
+/**
+ * i40e_dcbnl_vsi_del_app - Delete APP for given VSI
+ * @vsi: the corresponding vsi
+ * @app: APP to delete
+ *
+ * Delete given APP from the DCBNL APP table for given
+ * VSI
+ **/
+static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
+ struct i40e_ieee_app_priority_table *app)
+{
+ struct net_device *dev = vsi->netdev;
+ struct dcb_app sapp;
+
+ if (!dev)
+ return -EINVAL;
+
+ sapp.selector = app->selector;
+ sapp.protocol = app->protocolid;
+ sapp.priority = app->priority;
+ return dcb_ieee_delapp(dev, &sapp);
+}
+
+/**
+ * i40e_dcbnl_del_app - Delete APP on all VSIs
+ * @pf: the corresponding pf
+ * @app: APP to delete
+ *
+ * Delete given APP from all the VSIs for given PF
+ **/
+static void i40e_dcbnl_del_app(struct i40e_pf *pf,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int v, err;
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v] && pf->vsi[v]->netdev) {
+ err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ if (err)
+ dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+ __func__, pf->vsi[v]->seid,
+ err, app->selector,
+ app->protocolid, app->priority);
+ }
+ }
+}
+
+/**
+ * i40e_dcbnl_find_app - Search APP in given DCB config
+ * @cfg: DCBX configuration data
+ * @app: APP to search for
+ *
+ * Find given APP in the DCB configuration
+ **/
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_ieee_app_priority_table *app)
+{
+ int i;
+
+ for (i = 0; i < cfg->numapps; i++) {
+ if (app->selector == cfg->app[i].selector &&
+ app->protocolid == cfg->app[i].protocolid &&
+ app->priority == cfg->app[i].priority)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_dcbnl_flush_apps - Delete all removed APPs
+ * @pf: the corresponding pf
+ * @new_cfg: new DCBX configuration data
+ *
+ * Find and delete all APPs that are not present in the passed
+ * DCB configuration
+ **/
+void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_ieee_app_priority_table app;
+ struct i40e_dcbx_config *dcbxcfg;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ dcbxcfg = &hw->local_dcbx_config;
+ for (i = 0; i < dcbxcfg->numapps; i++) {
+ app = dcbxcfg->app[i];
+ /* The APP is not available anymore delete it */
+ if (!i40e_dcbnl_find_app(new_cfg, &app))
+ i40e_dcbnl_del_app(pf, &app);
+ }
+}
+
+/**
+ * i40e_dcbnl_setup - DCBNL setup
+ * @vsi: the corresponding vsi
+ *
+ * Set up DCBNL ops and initial APP TLVs
+ **/
+void i40e_dcbnl_setup(struct i40e_vsi *vsi)
+{
+ struct net_device *dev = vsi->netdev;
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ /* Not DCB capable */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return;
+
+ /* Do not setup DCB NL ops for MFP mode */
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ dev->dcbnl_ops = &dcbnl_ops;
+
+ /* Set initial IEEE DCB settings */
+ i40e_dcbnl_set_all(vsi);
+}
+#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8dbd91f64b7..cffdfc21290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -46,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
if (seid < 0)
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
else
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
return pf->vsi[i];
@@ -151,9 +150,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
- char dump_request_buf[16];
bool seid_found = false;
- int bytes_not_copied;
long seid = -1;
int buflen = 0;
int i, ret;
@@ -163,21 +160,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
/* don't allow partial writes */
if (*ppos != 0)
return 0;
- if (count >= sizeof(dump_request_buf))
- return -ENOSPC;
-
- bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- if (bytes_not_copied > 0)
- count -= bytes_not_copied;
- dump_request_buf[count] = '\0';
/* decode the SEID given to be dumped */
- ret = kstrtol(dump_request_buf, 0, &seid);
- if (ret < 0) {
- dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
- dump_request_buf);
+ ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "bad seid value\n");
} else if (seid == 0) {
seid_found = true;
@@ -203,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc, len);
+ memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
p += len;
len = (sizeof(struct i40e_aq_desc)
* pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc, len);
+ memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
p += len;
i40e_dbg_dump_data_len = buflen;
@@ -245,26 +233,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
memcpy(p, vsi, len);
p += len;
- len = (sizeof(struct i40e_q_vector)
- * vsi->num_q_vectors);
- memcpy(p, vsi->q_vectors, len);
- p += len;
-
- len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
- memcpy(p, vsi->tx_rings, len);
- p += len;
- memcpy(p, vsi->rx_rings, len);
- p += len;
+ if (vsi->num_q_vectors) {
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+ }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- len = sizeof(struct i40e_tx_buffer);
- memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ if (vsi->num_queue_pairs) {
+ len = (sizeof(struct i40e_ring) *
+ vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
p += len;
}
- for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+ if (vsi->tx_rings[0]) {
+ len = sizeof(struct i40e_tx_buffer);
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+ p += len;
+ }
len = sizeof(struct i40e_rx_buffer);
- memcpy(p, vsi->rx_rings[i].rx_bi, len);
- p += len;
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+ p += len;
+ }
}
/* macvlan filter list */
@@ -366,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
}
/**
- * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
* @seid: the seid the user put in
**/
@@ -484,100 +479,105 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
- if (vsi->rx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, vsi->rx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, vsi->rx_rings[i].dev,
- vsi->rx_rings[i].netdev,
- vsi->rx_rings[i].rx_bi);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->rx_rings[i].state,
- vsi->rx_rings[i].queue_index,
- vsi->rx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, vsi->rx_rings[i].rx_hdr_len,
- vsi->rx_rings[i].rx_buf_len,
- vsi->rx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->rx_rings[i].hsplit,
- vsi->rx_rings[i].next_to_use,
- vsi->rx_rings[i].next_to_clean,
- vsi->rx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
- i, vsi->rx_rings[i].rx_stats.packets,
- vsi->rx_rings[i].rx_stats.bytes,
- vsi->rx_rings[i].rx_stats.non_eop_descs);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
- i,
- vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
- vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->rx_rings[i].size,
- (long unsigned int)vsi->rx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->rx_rings[i].vsi,
- vsi->rx_rings[i].q_vector);
- }
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ if (!rx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, rx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, rx_ring->dev,
+ rx_ring->netdev,
+ rx_ring->rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, rx_ring->state,
+ rx_ring->queue_index,
+ rx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, rx_ring->rx_hdr_len,
+ rx_ring->rx_buf_len,
+ rx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, rx_ring->hsplit,
+ rx_ring->next_to_use,
+ rx_ring->next_to_clean,
+ rx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, rx_ring->stats.packets,
+ rx_ring->stats.bytes,
+ rx_ring->rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
+ i,
+ rx_ring->rx_stats.alloc_page_failed,
+ rx_ring->rx_stats.alloc_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, rx_ring->size,
+ (long unsigned int)rx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, rx_ring->vsi,
+ rx_ring->q_vector);
}
- if (vsi->tx_rings) {
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, vsi->tx_rings[i].desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, vsi->tx_rings[i].dev,
- vsi->tx_rings[i].netdev,
- vsi->tx_rings[i].tx_bi);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, vsi->tx_rings[i].state,
- vsi->tx_rings[i].queue_index,
- vsi->tx_rings[i].reg_idx);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, vsi->tx_rings[i].dtype);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, vsi->tx_rings[i].hsplit,
- vsi->tx_rings[i].next_to_use,
- vsi->tx_rings[i].next_to_clean,
- vsi->tx_rings[i].ring_active);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
- i, vsi->tx_rings[i].tx_stats.packets,
- vsi->tx_rings[i].tx_stats.bytes,
- vsi->tx_rings[i].tx_stats.restart_queue);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
- i,
- vsi->tx_rings[i].tx_stats.tx_busy,
- vsi->tx_rings[i].tx_stats.completed,
- vsi->tx_rings[i].tx_stats.tx_done_old);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, vsi->tx_rings[i].size,
- (long unsigned int)vsi->tx_rings[i].dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, vsi->tx_rings[i].vsi,
- vsi->tx_rings[i].q_vector);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: DCB tc = %d\n",
- i, vsi->tx_rings[i].dcb_tc);
- }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
+
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, tx_ring->desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, tx_ring->dev,
+ tx_ring->netdev,
+ tx_ring->tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, tx_ring->state,
+ tx_ring->queue_index,
+ tx_ring->reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, tx_ring->dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, tx_ring->hsplit,
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, tx_ring->stats.packets,
+ tx_ring->stats.bytes,
+ tx_ring->tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ i,
+ tx_ring->tx_stats.tx_busy,
+ tx_ring->tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, tx_ring->size,
+ (long unsigned int)tx_ring->dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, tx_ring->vsi,
+ tx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, tx_ring->dcb_tc);
}
+ rcu_read_unlock();
dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
- if (vsi->q_vectors) {
- for (i = 0; i < vsi->num_q_vectors; i++) {
- dev_info(&pf->pdev->dev,
- " q_vectors[%i]: base index = %ld\n",
- i, ((long int)*vsi->q_vectors[i].rx.ring-
- (long int)*vsi->q_vectors[0].rx.ring)/
- sizeof(struct i40e_ring));
- }
- }
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -716,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
{
struct i40e_adminq_ring *ring;
struct i40e_hw *hw = &pf->hw;
+ char hdr[32];
int i;
+ snprintf(hdr, sizeof(hdr), "%s %s: ",
+ dev_driver_string(&pf->pdev->dev),
+ dev_name(&pf->pdev->dev));
+
/* first the send (command) ring, then the receive (event) ring */
dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
ring = &(hw->aq.asq);
@@ -727,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
@@ -745,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
d->cookie_high, d->cookie_low);
- dev_info(&pf->pdev->dev,
- " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- d->params.raw[0], d->params.raw[1], d->params.raw[2],
- d->params.raw[3], d->params.raw[4], d->params.raw[5],
- d->params.raw[6], d->params.raw[7], d->params.raw[8],
- d->params.raw[9], d->params.raw[10], d->params.raw[11],
- d->params.raw[12], d->params.raw[13],
- d->params.raw[14], d->params.raw[15]);
+ print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE,
+ 16, 1, d->params.raw, 16, 0);
}
}
@@ -768,53 +752,56 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
struct i40e_pf *pf, bool is_rx_ring)
{
- union i40e_rx_desc *ds;
+ struct i40e_tx_desc *txd;
+ union i40e_rx_desc *rxd;
struct i40e_ring ring;
struct i40e_vsi *vsi;
int i;
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
- dev_info(&pf->pdev->dev,
- "vsi %d not found\n", vsi_seid);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
+ dev_info(&pf->pdev->dev,
+ "descriptor rings have not been allocated for vsi %d\n",
+ vsi_seid);
return;
}
if (is_rx_ring)
- ring = vsi->rx_rings[ring_id];
+ ring = *vsi->rx_rings[ring_id];
else
- ring = vsi->tx_rings[ring_id];
+ ring = *vsi->tx_rings[ring_id];
if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
for (i = 0; i < ring.count; i++) {
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, i);
- else
- ds = (union i40e_rx_desc *)
- I40E_TX_DESC(&ring, i);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n", i,
- ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, txd->buffer_addr,
+ txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, i);
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n",
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, i);
dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- i, ds->read.pkt_addr,
- ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ i, rxd->read.pkt_addr,
+ rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
}
} else if (cnt == 3) {
if (desc_n >= ring.count || desc_n < 0) {
@@ -822,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"descriptor %d not found\n", desc_n);
return;
}
- if (is_rx_ring)
- ds = I40E_RX_DESC(&ring, desc_n);
- else
- ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
- if ((sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
- dev_info(&pf->pdev->dev,
- "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
- vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
- else
+ if (!is_rx_ring) {
+ txd = I40E_TX_DESC(&ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ txd->buffer_addr, txd->cmd_type_offset_bsz);
+ } else if (sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) {
+ rxd = I40E_RX_DESC(&ring, desc_n);
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
+ } else {
+ rxd = I40E_RX_DESC(&ring, desc_n);
dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id,
- desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
- ds->read.rsvd1, ds->read.rsvd2);
+ vsi_seid, ring_id, desc_n,
+ rxd->read.pkt_addr, rxd->read.hdr_addr,
+ rxd->read.rsvd1, rxd->read.rsvd2);
+ }
} else {
- if (is_rx_ring)
- dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
- else
- dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
}
@@ -854,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
i, pf->vsi[i]->seid);
@@ -873,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
" rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
dev_info(&pf->pdev->dev,
- " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
- estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+ estats->rx_broadcast, estats->rx_discards);
dev_info(&pf->pdev->dev,
- " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
- estats->rx_missed, estats->rx_unknown_protocol,
- estats->tx_bytes);
+ " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_unknown_protocol, estats->tx_bytes);
dev_info(&pf->pdev->dev,
" tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -988,8 +976,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
- dev_info(&pf->pdev->dev,
- "%d: can't find veb\n", seid);
+ dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
@@ -1015,6 +1002,24 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
}
}
+/**
+ * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
+ * @pf: the pf that would be altered
+ * @flag: flag that needs enabling or disabling
+ * @enable: Enable/disable FD SD/ATR
+ **/
+static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
+{
+ if (enable) {
+ pf->flags |= flag;
+ } else {
+ pf->flags &= ~flag;
+ pf->auto_disable_flags |= flag;
+ }
+ dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+}
+
#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
/**
* i40e_dbg_command_write - write into command datum
@@ -1028,11 +1033,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied;
struct i40e_vsi *vsi;
- u8 *print_buf_start;
- u8 *print_buf;
- char *cmd_buf;
int vsi_seid;
int veb_seid;
int cnt;
@@ -1051,10 +1054,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count -= bytes_not_copied;
cmd_buf[count] = '\0';
- print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
- if (!print_buf_start)
- goto command_write_done;
- print_buf = print_buf_start;
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
if (strncmp(cmd_buf, "add vsi", 7) == 0) {
vsi_seid = -1;
@@ -1107,7 +1111,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
- "add relay: vsi VSI %d not found\n", vsi_seid);
+ "add relay: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
@@ -1157,9 +1161,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
- u8 ma[6];
- int vlan = 0;
struct i40e_mac_filter *f;
+ int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1199,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
- u8 ma[6];
int vlan = 0;
+ u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1236,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- int v;
- u16 vid;
i40e_status ret;
+ u16 vid;
+ int v;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1464,21 +1468,25 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->msg_enable);
}
} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
- dev_info(&pf->pdev->dev, "forcing PFR\n");
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "corer", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing CoreR\n");
- i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+ dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "globr", 5) == 0) {
- dev_info(&pf->pdev->dev, "forcing GlobR\n");
- i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+ dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "empr", 4) == 0) {
+ dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
+ i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
- cnt = sscanf(&cmd_buf[4], "%x", &address);
+ cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
goto command_write_done;
@@ -1497,7 +1505,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
- cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
goto command_write_done;
@@ -1515,10 +1523,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
address, value);
} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
- cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
@@ -1542,32 +1550,152 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
}
+ } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[11],
+ "%hx %hx %hx %hx %x %x %x %x %x %x",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "send aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ kfree(desc);
+ desc = NULL;
+ } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ struct i40e_aq_desc *desc;
+ i40e_status ret;
+ u16 buffer_len;
+ u8 *buff;
+
+ desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ if (!desc)
+ goto command_write_done;
+ cnt = sscanf(&cmd_buf[20],
+ "%hx %hx %hx %hx %x %x %x %x %x %x %hd",
+ &desc->flags,
+ &desc->opcode, &desc->datalen, &desc->retval,
+ &desc->cookie_high, &desc->cookie_low,
+ &desc->params.internal.param0,
+ &desc->params.internal.param1,
+ &desc->params.internal.param2,
+ &desc->params.internal.param3,
+ &buffer_len);
+ if (cnt != 11) {
+ dev_info(&pf->pdev->dev,
+ "send indirect aq_cmd: bad command string, cnt=%d\n",
+ cnt);
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ /* Just stub a buffer big enough in case user messed up */
+ if (buffer_len == 0)
+ buffer_len = 1280;
+
+ buff = kzalloc(buffer_len, GFP_KERNEL);
+ if (!buff) {
+ kfree(desc);
+ desc = NULL;
+ goto command_write_done;
+ }
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ ret = i40e_asq_send_command(&pf->hw, desc, buff,
+ buffer_len, NULL);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
+ } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x AQ Error: %d\n",
+ desc->opcode, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "AQ command send failed Opcode %x Status: %d\n",
+ desc->opcode, ret);
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ desc->flags, desc->opcode, desc->datalen, desc->retval,
+ desc->cookie_high, desc->cookie_low,
+ desc->params.internal.param0,
+ desc->params.internal.param1,
+ desc->params.internal.param2,
+ desc->params.internal.param3);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, buffer_len, true);
+ kfree(buff);
+ buff = NULL;
+ kfree(desc);
+ desc = NULL;
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_data fd_data;
- int ret;
+ struct i40e_fdir_filter fd_data;
u16 packet_len, i, j = 0;
char *asc_packet;
+ u8 *raw_packet;
bool add = false;
+ int ret;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+
+ if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+ goto command_write_done;
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
GFP_KERNEL);
if (!asc_packet)
goto command_write_done;
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+ GFP_KERNEL);
- if (!fd_data.raw_packet) {
+ if (!raw_packet) {
kfree(asc_packet);
asc_packet = NULL;
goto command_write_done;
}
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
&fd_data.q_index,
&fd_data.flex_off, &fd_data.pctype,
&fd_data.dest_vsi, &fd_data.dest_ctl,
@@ -1579,42 +1707,42 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt);
kfree(asc_packet);
asc_packet = NULL;
- kfree(fd_data.raw_packet);
+ kfree(raw_packet);
goto command_write_done;
}
/* fix packet length if user entered 0 */
if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
/* make sure to check the max as well */
packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
- dev_info(&pf->pdev->dev, "FD raw packet:\n");
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
- &fd_data.raw_packet[i]);
+ &raw_packet[i]);
j += 3;
- snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
}
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
- ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ dev_info(&pf->pdev->dev, "FD raw packet dump\n");
+ print_hex_dump(KERN_INFO, "FD raw packet: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ raw_packet, packet_len, true);
+ ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
if (!ret) {
dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
} else {
dev_info(&pf->pdev->dev,
"Filter command send failed %d\n", ret);
}
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ kfree(raw_packet);
+ raw_packet = NULL;
kfree(asc_packet);
asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
+ } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
+ i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1625,8 +1753,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, true, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Add Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
+ ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
+ pf->hw.mac.addr,
+ I40E_ETH_P_LLDP, 0,
+ pf->vsi[pf->lan_vsi]->seid,
+ 0, false, NULL, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
+ __func__, pf->hw.aq.asq_last_status);
+ /* Continue and start FW LLDP anyways */
+ }
+
ret = i40e_aq_start_lldp(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1634,11 +1789,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
pf->hw.aq.asq_last_status);
goto command_write_done;
}
+#ifdef CONFIG_I40E_DCB
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) {
- int ret, i;
- u8 *buff;
u16 llen, rlen;
+ int ret;
+ u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1655,23 +1814,16 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (local) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (local): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
- int ret, i;
- u8 *buff;
u16 llen, rlen;
+ int ret;
+ u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1689,17 +1841,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
goto command_write_done;
}
- dev_info(&pf->pdev->dev,
- "Get LLDP MIB (remote) AQ buffer written back:\n");
- for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
- snprintf(print_buf, 3, "%02x ", buff[i]);
- print_buf += 3;
- if ((i % 16) == 15) {
- snprintf(print_buf, 1, "\n");
- print_buf++;
- }
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
+ print_hex_dump(KERN_INFO, "LLDP MIB (remote): ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, I40E_LLDPDU_SIZE, true);
kfree(buff);
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
@@ -1724,7 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
}
} else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
- u16 buffer_len, i, bytes;
+ u16 buffer_len, bytes;
u16 module;
u32 offset;
u16 *buff;
@@ -1747,11 +1892,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- /* Read at least 512 words */
- if (buffer_len == 0)
- buffer_len = 512;
+ /* set the max length */
+ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
bytes = 2 * buffer_len;
+
+ /* read at least 1k bytes, no more than 4kB */
+ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
buff = kzalloc(bytes, GFP_KERNEL);
if (!buff)
goto command_write_done;
@@ -1776,16 +1923,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev,
"Read NVM module=0x%x offset=0x%x words=%d\n",
module, offset, buffer_len);
- for (i = 0; i < buffer_len; i++) {
- if ((i % 16) == 0) {
- snprintf(print_buf, 11, "\n0x%08x: ",
- offset + i);
- print_buf += 11;
- }
- snprintf(print_buf, 5, "%04x ", buff[i]);
- print_buf += 5;
- }
- dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ if (bytes)
+ print_hex_dump(KERN_INFO, "NVM Dump: ",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ buff, bytes, true);
}
kfree(buff);
buff = NULL;
@@ -1815,8 +1956,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
+ dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " fd-atr off\n");
+ dev_info(&pf->pdev->dev, " fd-atr on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
@@ -1829,9 +1974,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
command_write_done:
kfree(cmd_buf);
cmd_buf = NULL;
- kfree(print_buf_start);
- print_buf = NULL;
- print_buf_start = NULL;
return count;
}
@@ -1903,6 +2045,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
struct i40e_pf *pf = filp->private_data;
int bytes_not_copied;
struct i40e_vsi *vsi;
+ char *buf_tmp;
int vsi_seid;
int i, cnt;
@@ -1921,6 +2064,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count -= bytes_not_copied;
i40e_dbg_netdev_ops_buf[count] = '\0';
+ buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+ if (buf_tmp) {
+ *buf_tmp = '\0';
+ count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+ }
+
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
@@ -1931,9 +2080,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"tx_timeout: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+ dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "tx_timeout called\n");
@@ -1952,9 +2105,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"change_mtu: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
mtu);
rtnl_unlock();
@@ -1973,9 +2127,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev,
"set_rx_mode: VSI %d not found\n", vsi_seid);
- goto netdev_ops_write_done;
- }
- if (rtnl_trylock()) {
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+ vsi_seid);
+ } else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "set_rx_mode called\n");
@@ -1993,11 +2148,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
if (!vsi) {
dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
vsi_seid);
- goto netdev_ops_write_done;
+ } else if (!vsi->netdev) {
+ dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+ vsi_seid);
+ } else {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i]->napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
}
- for (i = 0; i < vsi->num_q_vectors; i++)
- napi_schedule(&vsi->q_vectors[i].napi);
- dev_info(&pf->pdev->dev, "napi called\n");
} else {
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
@@ -2024,21 +2182,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile __attribute__((unused));
+ struct dentry *pfile;
const char *name = pci_name(pf->pdev);
+ const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (pf->i40e_dbg_pf) {
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_command_fops);
- pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_dump_fops);
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
- pf, &i40e_dbg_netdev_ops_fops);
- } else {
- dev_info(&pf->pdev->dev,
- "debugfs entry for %s failed\n", name);
- }
+ if (!pf->i40e_dbg_pf)
+ return;
+
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ if (!pfile)
+ goto create_failed;
+
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
+ if (!pfile)
+ goto create_failed;
+
+ return;
+
+create_failed:
+ dev_info(dev, "debugfs dir/file for %s failed\n", name);
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index de255143bde..56438bd579e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -68,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
- {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
- {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
- {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
- {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1,
+ I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
+ I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
+ I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1,
+ I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1,
+ I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -94,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
u32 reg, mask;
u32 i, j;
- for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ !ret_code; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
@@ -119,7 +142,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
- if ((!ret_code) &&
+ if (!ret_code &&
((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
(0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
ret_code = i40e_validate_nvm_checksum(hw, NULL);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index 3d98277f452..0b591165208 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -31,10 +30,10 @@
#include "i40e_type.h"
enum i40e_lb_mode {
- I40E_LB_MODE_NONE = 0,
- I40E_LB_MODE_PHY_LOCAL,
- I40E_LB_MODE_PHY_REMOTE,
- I40E_LB_MODE_MAC_LOCAL,
+ I40E_LB_MODE_NONE = 0x0,
+ I40E_LB_MODE_PHY_LOCAL = I40E_AQ_LB_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL = I40E_AQ_LB_MAC_LOCAL,
};
struct i40e_diag_reg_test_info {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a76b8cec76..4a488ffcd6b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,6 +46,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -57,12 +58,39 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
I40E_NETDEV_STAT(tx_dropped),
- I40E_NETDEV_STAT(multicast),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
I40E_NETDEV_STAT(rx_crc_errors),
};
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+ I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+};
+
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd);
+
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate. This device supports Virtualization, and
* as such might have several netdevs supporting VMDq and FCoE going
@@ -76,7 +104,12 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -85,6 +118,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -109,14 +144,26 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+
+ /* LPI stats */
+ I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
+ I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
+ I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
+ I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
};
#define I40E_QUEUE_STATS_LEN(n) \
- ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
- ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+ * 2 /* Tx and Rx together */ \
+ * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -125,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -193,28 +241,48 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->supported = SUPPORTED_10000baseKR_Full;
ecmd->advertising = ADVERTISED_10000baseKR_Full;
break;
- case I40E_PHY_TYPE_10GBASE_T:
default:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full;
+ if (i40e_is_40G_device(hw->device_id)) {
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
break;
}
- /* for now just say autoneg all the time */
ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
- if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ switch (hw->phy.media_type) {
+ case I40E_MEDIA_TYPE_BACKPLANE:
ecmd->supported |= SUPPORTED_Backplane;
ecmd->advertising |= ADVERTISED_Backplane;
ecmd->port = PORT_NONE;
- } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ break;
+ case I40E_MEDIA_TYPE_BASET:
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- } else {
+ break;
+ case I40E_MEDIA_TYPE_DA:
+ case I40E_MEDIA_TYPE_CX4:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_DA;
+ break;
+ case I40E_MEDIA_TYPE_FIBER:
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
+ break;
+ case I40E_MEDIA_TYPE_UNKNOWN:
+ default:
+ ecmd->port = PORT_OTHER;
+ break;
}
ecmd->transceiver = XCVR_EXTERNAL;
@@ -256,12 +324,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,
((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
- pause->rx_pause = 0;
- pause->tx_pause = 0;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ if (hw->fc.current_mode == I40E_FC_RX_PAUSE) {
+ pause->rx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) {
+ pause->tx_pause = 1;
+ } else if (hw->fc.current_mode == I40E_FC_FULL) {
pause->rx_pause = 1;
- if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
pause->tx_pause = 1;
+ }
}
static u32 i40e_get_msglevel(struct net_device *netdev)
@@ -329,38 +399,56 @@ static int i40e_get_eeprom(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- int first_word, last_word;
- u16 i, eeprom_len;
- u16 *eeprom_buff;
- int ret_val = 0;
-
+ struct i40e_pf *pf = np->vsi->back;
+ int ret_val = 0, len;
+ u8 *eeprom_buff;
+ u16 i, sectors;
+ bool last;
+#define I40E_NVM_SECTOR_SIZE 4096
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_len = last_word - first_word + 1;
-
- eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
- ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
- eeprom_buff);
- if (eeprom_len == 0) {
- kfree(eeprom_buff);
- return -EACCES;
+ ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto free_buff;
}
- /* Device's eeprom is always little-endian, word addressable */
- for (i = 0; i < eeprom_len; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ sectors = eeprom->len / I40E_NVM_SECTOR_SIZE;
+ sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0;
+ len = I40E_NVM_SECTOR_SIZE;
+ last = false;
+ for (i = 0; i < sectors; i++) {
+ if (i == (sectors - 1)) {
+ len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i);
+ last = true;
+ }
+ ret_val = i40e_aq_read_nvm(hw, 0x0,
+ eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
+ len,
+ eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+ last, NULL);
+ if (ret_val) {
+ dev_info(&pf->pdev->dev,
+ "read NVM failed err=%d status=0x%x\n",
+ ret_val, hw->aq.asq_last_status);
+ goto release_nvm;
+ }
+ }
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+release_nvm:
+ i40e_release_nvm(hw);
+ memcpy(bytes, eeprom_buff, eeprom->len);
+free_buff:
kfree(eeprom_buff);
-
return ret_val;
}
@@ -368,8 +456,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
-
- return hw->nvm.sr_size * 2;
+ u32 val;
+
+ val = (rd32(hw, I40E_GLPCI_LBARCTRL)
+ & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
+ >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
+ /* register returns value in power of 2, 64Kbyte chunks. */
+ val = (64 * 1024) * (1 << val);
+ return val;
}
static void i40e_get_drvinfo(struct net_device *netdev,
@@ -399,8 +493,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
- ring->rx_pending = vsi->rx_rings[0].count;
- ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_pending = vsi->rx_rings[0]->count;
+ ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
@@ -418,19 +512,23 @@ static int i40e_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
+ ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
+ ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
+ netdev_info(netdev,
+ "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+ ring->tx_pending, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- I40E_MIN_NUM_DESCRIPTORS,
- I40E_MAX_NUM_DESCRIPTORS);
- new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
+ new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == vsi->tx_rings[0].count) &&
- (new_rx_count == vsi->rx_rings[0].count))
+ if ((new_tx_count == vsi->tx_rings[0]->count) &&
+ (new_rx_count == vsi->rx_rings[0]->count))
return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +537,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) {
- vsi->tx_rings[i].count = new_tx_count;
- vsi->rx_rings[i].count = new_rx_count;
+ vsi->tx_rings[i]->count = new_tx_count;
+ vsi->rx_rings[i]->count = new_rx_count;
}
goto done;
}
@@ -451,10 +549,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
/* alloc updated Tx resources */
- if (new_tx_count != vsi->tx_rings[0].count) {
+ if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n",
- vsi->tx_rings[0].count, new_tx_count);
+ vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) {
@@ -464,7 +562,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
@@ -481,10 +579,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
/* alloc updated Rx resources */
- if (new_rx_count != vsi->rx_rings[0].count) {
+ if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n",
- vsi->rx_rings[0].count, new_rx_count);
+ vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) {
@@ -494,7 +592,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */
- rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
@@ -517,8 +615,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_tx_resources(&vsi->tx_rings[i]);
- vsi->tx_rings[i] = tx_rings[i];
+ i40e_free_tx_resources(vsi->tx_rings[i]);
+ *vsi->tx_rings[i] = tx_rings[i];
}
kfree(tx_rings);
tx_rings = NULL;
@@ -526,8 +624,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_free_rx_resources(&vsi->rx_rings[i]);
- vsi->rx_rings[i] = rx_rings[i];
+ i40e_free_rx_resources(vsi->rx_rings[i]);
+ *vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
rx_rings = NULL;
@@ -560,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi])
- return I40E_PF_STATS_LEN(netdev);
- else
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ int len = I40E_PF_STATS_LEN(netdev);
+
+ if (pf->lan_veb != I40E_NO_VEB)
+ len += I40E_VEB_STATS_LEN;
+ return len;
+ } else {
return I40E_VSI_STATS_LEN(netdev);
+ }
default:
return -EOPNOTSUPP;
}
@@ -573,12 +676,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
int i = 0;
char *p;
int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+ unsigned int start;
i40e_update_stats(vsi);
@@ -587,31 +692,63 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->tx_rings[j].tx_stats.packets;
- data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+ for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- data[i++] = vsi->rx_rings[j].rx_stats.packets;
- data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+
+ if (!tx_ring)
+ continue;
+
+ /* process Tx ring statistics */
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ data[i] = tx_ring->stats.packets;
+ data[i + 1] = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
+
+ /* Rx ring is the 2nd half of the queue pair */
+ rx_ring = &tx_ring[1];
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ rcu_read_unlock();
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ p = (char *)veb;
+ p += i40e_gstrings_veb_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
}
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -636,46 +773,57 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_net_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+ i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
@@ -684,11 +832,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
static int i40e_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
- return ethtool_op_get_ts_info(dev, info);
+ struct i40e_pf *pf = i40e_netdev_to_pf(dev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pf->ptp_clock)
+ info->phc_index = ptp_clock_index(pf->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
}
-static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+static int i40e_link_test(struct net_device *netdev, u64 *data)
{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "link test\n");
if (i40e_get_link_status(&pf->hw))
*data = 0;
else
@@ -697,36 +878,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)
return *data;
}
-static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+static int i40e_reg_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_reg_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "register test\n");
+ *data = i40e_diag_reg_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+static int i40e_eeprom_test(struct net_device *netdev, u64 *data)
{
- i40e_status ret;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
- ret = i40e_diag_eeprom_test(&pf->hw);
- *data = ret;
+ netif_info(pf, hw, netdev, "eeprom test\n");
+ *data = i40e_diag_eeprom_test(&pf->hw);
- return ret;
+ return *data;
}
-static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+static int i40e_intr_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ u16 swc_old = pf->sw_int_count;
+
+ netif_info(pf, hw, netdev, "interrupt test\n");
+ wr32(&pf->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ usleep_range(1000, 2000);
+ *data = (swc_old == pf->sw_int_count);
return *data;
}
-static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
- *data = -ENOSYS;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ netif_info(pf, hw, netdev, "loopback test not implemented\n");
+ *data = 0;
return *data;
}
@@ -737,42 +933,38 @@ static void i40e_diag_test(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- set_bit(__I40E_TESTING, &pf->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
+ netif_info(pf, drv, netdev, "offline testing starting\n");
- netdev_info(netdev, "offline testing starting\n");
+ set_bit(__I40E_TESTING, &pf->state);
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
*/
- netdev_info(netdev, "link test starting\n");
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- netdev_info(netdev, "register test starting\n");
- if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "eeprom test starting\n");
- if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "interrupt test starting\n");
- if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
- netdev_info(netdev, "loopback test starting\n");
- if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ /* run reg test last, a reset is required after it */
+ if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
} else {
- netdev_info(netdev, "online test starting\n");
/* Online tests */
- if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ netif_info(pf, drv, netdev, "online testing starting\n");
+
+ if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Offline only tests, not run in online; pass by default */
@@ -780,16 +972,53 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
-
- clear_bit(__I40E_TESTING, &pf->state);
}
+
+ netif_info(pf, drv, netdev, "testing finished\n");
}
static void i40e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
- wol->supported = 0;
- wol->wolopts = 0;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if ((1 << hw->port) & wol_nvm_bits) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ } else {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0);
+ }
+}
+
+static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 wol_nvm_bits;
+
+ /* NVM bit on means WoL disabled for the port */
+ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+ if (((1 << hw->port) & wol_nvm_bits))
+ return -EOPNOTSUPP;
+
+ /* only magic packet is supported */
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ /* is this a new value? */
+ if (pf->wol_en != !!wol->wolopts) {
+ pf->wol_en = !!wol->wolopts;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+ }
+
+ return 0;
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -823,13 +1052,13 @@ static int i40e_set_phys_id(struct net_device *netdev,
pf->led_status = i40e_led_get(hw);
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF);
+ i40e_led_set(hw, 0xF, false);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0);
+ i40e_led_set(hw, 0x0, false);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status);
+ i40e_led_set(hw, pf->led_status, false);
break;
}
@@ -851,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -877,41 +1105,31 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
- break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
- break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- break;
- default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
@@ -965,6 +1183,92 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
}
/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_fdir_filter *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->fd_id;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct i40e_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->fd_id)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->fd_id)
+ return -EINVAL;
+
+ fsp->flow_type = rule->flow_type;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
+ /* Reverse the src and dest notion, since the HW views them from
+ * Tx perspective where as the user expects it from Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+
+ if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+ return 0;
+}
+
+/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -988,14 +1292,17 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
ret = i40e_get_rss_hash_opts(pf, cmd);
break;
case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = pf->fdir_pf_active_filters;
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
- ret = 0;
+ ret = i40e_get_ethtool_fdir_entry(pf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- cmd->data = 500;
- ret = 0;
+ ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+ break;
default:
break;
}
@@ -1056,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -1074,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -1126,265 +1425,191 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return 0;
}
-#define IP_HEADER_OFFSET 14
/**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_match_fdir_input_set - Match a new filter against an existing one
+ * @rule: The filter already added
+ * @input: The new filter to comapre against
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns true if the two input set match
**/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
+ struct i40e_fdir_filter *input)
{
- struct i40e_pf *pf = vsi->back;
- struct udphdr *udp;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
-
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
-
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- udp->source = fsp->h_u.tcp_ip4_spec.psrc;
- udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ if ((rule->dst_ip[0] != input->dst_ip[0]) ||
+ (rule->src_ip[0] != input->src_ip[0]) ||
+ (rule->dst_port != input->dst_port) ||
+ (rule->src_port != input->src_port))
+ return false;
+ return true;
}
/**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns 0 on success
**/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input,
+ u16 sw_idx,
+ struct ethtool_rxnfc *cmd)
{
+ struct i40e_fdir_filter *rule, *parent;
struct i40e_pf *pf = vsi->back;
- struct tcphdr *tcp;
- struct iphdr *ip;
- bool err = false;
- int ret;
+ struct hlist_node *node2;
+ int err = -EINVAL;
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
- tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
+ parent = NULL;
+ rule = NULL;
- ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
- tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
+ hlist_for_each_entry_safe(rule, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->fd_id >= sw_idx)
+ break;
+ parent = rule;
+ }
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->fd_id == sw_idx)) {
+ if (input && !i40e_match_fdir_input_set(rule, input))
+ err = i40e_add_del_fdir(vsi, rule, false);
+ else if (!input)
+ err = i40e_add_del_fdir(vsi, rule, false);
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ pf->fdir_pf_active_filters--;
}
- ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
- tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
- ret = i40e_program_fdir_filter(fd_data, pf, add);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &pf->fdir_filter_list);
- return err ? -EOPNOTSUPP : 0;
-}
+ /* update counts */
+ pf->fdir_pf_active_filters++;
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
-{
- return -EOPNOTSUPP;
+ return 0;
}
/**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @fsp: the ethtool flow spec
- * @add: true adds a filter, false removes it
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
*
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
- struct i40e_fdir_data *fd_data,
- struct ethtool_rx_flow_spec *fsp, bool add)
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
struct i40e_pf *pf = vsi->back;
- struct iphdr *ip;
- bool err = false;
- int ret;
- int i;
+ int ret = 0;
- ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
- ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
- ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
- ip->protocol = fsp->h_u.usr_ip4_spec.proto;
-
- for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
- }
-
- return err ? -EOPNOTSUPP : 0;
+ i40e_fdir_check_and_reenable(pf);
+ return ret;
}
/**
- * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
- * a specific flow spec based on their protocol
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
* @vsi: pointer to the targeted VSI
* @cmd: command to get or set RX flow classification rules
- * @add: true adds a filter, false removes it
*
- * Returns 0 if the filters were successfully added or removed
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol. Returns 0 if the filters were successfully added.
**/
-static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
- struct ethtool_rxnfc *cmd, bool add)
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd)
{
- struct i40e_fdir_data fd_data;
- int ret = -EINVAL;
+ struct ethtool_rx_flow_spec *fsp;
+ struct i40e_fdir_filter *input;
struct i40e_pf *pf;
- struct ethtool_rx_flow_spec *fsp =
- (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int ret = -EINVAL;
if (!vsi)
return -EINVAL;
pf = vsi->back;
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return -EOPNOTSUPP;
+
+ if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+ return -ENOSPC;
+
+ fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+ pf->hw.func_caps.fd_filters_guaranteed)) {
+ return -EINVAL;
+ }
+
if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
(fsp->ring_cookie >= vsi->num_queue_pairs))
return -EINVAL;
- /* Populate the Flow Director that we have at the moment
- * and allocate the raw packet buffer for the calling functions
- */
- fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
- GFP_KERNEL);
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!fd_data.raw_packet) {
- dev_info(&pf->pdev->dev, "Could not allocate memory\n");
+ if (!input)
return -ENOMEM;
- }
- fd_data.q_index = fsp->ring_cookie;
- fd_data.flex_off = 0;
- fd_data.pctype = 0;
- fd_data.dest_vsi = vsi->id;
- fd_data.dest_ctl = 0;
- fd_data.fd_status = 0;
- fd_data.cnt_index = 0;
- fd_data.fd_id = 0;
-
- switch (fsp->flow_type & ~FLOW_EXT) {
- case TCP_V4_FLOW:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case UDP_V4_FLOW:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- case IPV4_FLOW:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- case IP_USER_FLOW:
- switch (fsp->h_u.usr_ip4_spec.proto) {
- case IPPROTO_TCP:
- ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_UDP:
- ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
- break;
- case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
- break;
- default:
- ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
- break;
- }
- break;
- default:
- dev_info(&pf->pdev->dev, "Could not specify spec type\n");
- ret = -EINVAL;
- }
+ input->fd_id = fsp->location;
- kfree(fd_data.raw_packet);
- fd_data.raw_packet = NULL;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+ else
+ input->dest_ctl =
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+
+ input->q_index = fsp->ring_cookie;
+ input->flex_off = 0;
+ input->pctype = 0;
+ input->dest_vsi = vsi->id;
+ input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+ input->cnt_index = pf->fd_sb_cnt_idx;
+ input->flow_type = fsp->flow_type;
+ input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
+
+ /* Reverse the src and dest notion, since the HW expects them to be from
+ * Tx perspective where as the input from user is from Rx filter view.
+ */
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ ret = i40e_add_del_fdir(vsi, input, true);
+ if (ret)
+ kfree(input);
+ else
+ i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
return ret;
}
+
/**
* i40e_set_rxnfc - command to set RX flow classification rules
* @netdev: network interface device structure
@@ -1404,10 +1629,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
ret = i40e_set_rss_hash_opt(pf, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ ret = i40e_add_fdir_ethtool(vsi, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ ret = i40e_del_fdir_entry(vsi, cmd);
break;
default:
break;
@@ -1416,6 +1641,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ret;
}
+/**
+ * i40e_max_channels - get Max number of combined channels supported
+ * @vsi: vsi pointer
+ **/
+static unsigned int i40e_max_channels(struct i40e_vsi *vsi)
+{
+ /* TODO: This code assumes DCB and FD is disabled for now. */
+ return vsi->alloc_queue_pairs;
+}
+
+/**
+ * i40e_get_channels - Get the current channels enabled and max supported etc.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void i40e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ /* report maximum channels */
+ ch->max_combined = i40e_max_channels(vsi);
+
+ /* report info for other vector */
+ ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0;
+ ch->max_other = ch->other_count;
+
+ /* Note: This code assumes DCB is disabled for now. */
+ ch->combined_count = vsi->num_queue_pairs;
+}
+
+/**
+ * i40e_set_channels - Set the new channels count.
+ * @netdev: network interface device structure
+ * @ch: ethtool channels structure
+ *
+ * The new channels count may not be the same as requested by the user
+ * since it gets rounded down to a power of 2 value.
+ **/
+static int i40e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int new_count;
+
+ /* We do not support setting channels for any other VSI at present */
+ if (vsi->type != I40E_VSI_MAIN)
+ return -EINVAL;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > i40e_max_channels(vsi))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ /* TODO: Flow director limit, DCB etc */
+
+ /* cap RSS limit */
+ if (count > pf->rss_size_max)
+ count = pf->rss_size_max;
+
+ /* use rss_reconfig to rebuild with new queue count and update traffic
+ * class queue mapping
+ */
+ new_count = i40e_reconfig_rss_queues(pf, count);
+ if (new_count > 0)
+ return 0;
+ else
+ return -EINVAL;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.get_drvinfo = i40e_get_drvinfo,
@@ -1424,6 +1737,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.nway_reset = i40e_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = i40e_get_wol,
+ .set_wol = i40e_set_wol,
.get_eeprom_len = i40e_get_eeprom_len,
.get_eeprom = i40e_get_eeprom,
.get_ringparam = i40e_get_ringparam,
@@ -1440,10 +1754,12 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_channels = i40e_get_channels,
+ .set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+ netdev->ethtool_ops = &i40e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 901804af8b0..9b987ccc9e8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
+ i40e_status ret_code;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
sd_entry->u.pd_table.pd_entry =
(struct i40e_hmc_pd_entry *)
sd_entry->u.pd_table.pd_entry_virt_mem.va;
- memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.pd_table.pd_page_addr = mem;
} else {
- memcpy(&sd_entry->u.bp.addr, &mem,
- sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.addr = mem;
sd_entry->u.bp.sd_pd_index = sd_index;
}
/* initialize the sd entry */
@@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
if (ret_code)
goto exit;
- memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.addr = mem;
pd_entry->bp.sd_pd_index = pd_index;
pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
/* Set page address and valid bit */
@@ -204,7 +201,7 @@ exit:
**/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+ u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
@@ -240,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- if (is_pf)
- I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
- else
- I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index aacd42a261e..b45d8fedc5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -117,7 +116,6 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @pa: pointer to physical address
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
@@ -139,7 +137,6 @@ struct i40e_hmc_info {
* I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
* @hw: pointer to our hw struct
* @sd_index: segment descriptor index
- * @hmc_fn_id: hmc function id
* @type: if sd entry is direct or paged
**/
#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
@@ -160,18 +157,12 @@ struct i40e_hmc_info {
* @hw: pointer to our hw struct
* @sd_idx: segment descriptor index
* @pd_idx: page descriptor index
- * @hmc_fn_id: hmc function id
**/
#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
wr32((hw), I40E_PFHMC_PDINV, \
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -230,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index a695b91c9c7..870ab1ee072 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -398,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* remove the backing pages from pd_idx1 to i */
while (i && (i > pd_idx1)) {
i40e_remove_pd_bp(hw, info->hmc_info,
- (i - 1), true);
+ (i - 1));
i--;
}
}
@@ -434,11 +433,7 @@ exit_sd_error:
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) {
- i40e_remove_pd_bp(
- hw,
- info->hmc_info,
- i,
- true);
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
}
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
@@ -486,8 +481,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
/* Make one big object, a single SD */
info.count = 1;
ret_code = i40e_create_lan_hmc_object(hw, &info);
- if ((ret_code) &&
- (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
goto try_type_paged;
else if (ret_code)
goto configure_lan_hmc_out;
@@ -618,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
pd_table =
&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
- ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
- j, true);
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
if (ret_code)
goto exit;
}
@@ -749,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 00ff3500607..eb65fe23c4a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -57,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
@@ -113,8 +113,8 @@ enum i40e_hmc_lan_object_size {
#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
#define I40E_HMC_OBJ_SIZE_TXQ 128
#define I40E_HMC_OBJ_SIZE_RXQ 32
-#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
-#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
enum i40e_hmc_lan_rsrc_type {
I40E_HMC_LAN_FULL = 0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 221aa479501..275ca9a1719 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,10 @@
/* Local includes */
#include "i40e.h"
+#include "i40e_diag.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -35,23 +38,25 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -61,16 +66,14 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
- {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
/* required last entry */
{0, }
};
@@ -301,6 +304,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
break;
}
@@ -347,14 +351,59 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
**/
static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
- struct rtnl_link_stats64 *storage)
+ struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
+ struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
+ if (!vsi->tx_rings)
+ return stats;
+
+ rcu_read_lock();
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ u64 bytes, packets;
+ unsigned int start;
+
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ if (!tx_ring)
+ continue;
- *storage = *i40e_get_vsi_stats_struct(vsi);
+ do {
+ start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ packets = tx_ring->stats.packets;
+ bytes = tx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ rx_ring = &tx_ring[1];
+
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ packets = rx_ring->stats.packets;
+ bytes = rx_ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ rcu_read_unlock();
+
+ /* following stats updated by i40e_watchdog_subtask() */
+ stats->multicast = vsi_stats->multicast;
+ stats->tx_errors = vsi_stats->tx_errors;
+ stats->tx_dropped = vsi_stats->tx_dropped;
+ stats->rx_errors = vsi_stats->rx_errors;
+ stats->rx_crc_errors = vsi_stats->rx_crc_errors;
+ stats->rx_length_errors = vsi_stats->rx_length_errors;
- return storage;
+ return stats;
}
/**
@@ -374,13 +423,18 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
- memset(&vsi->rx_rings[i].rx_stats, 0 ,
- sizeof(vsi->rx_rings[i].rx_stats));
- memset(&vsi->tx_rings[i].tx_stats, 0,
- sizeof(vsi->tx_rings[i].tx_stats));
+ memset(&vsi->rx_rings[i]->stats, 0 ,
+ sizeof(vsi->rx_rings[i]->stats));
+ memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i]->rx_stats));
+ memset(&vsi->tx_rings[i]->stats, 0 ,
+ sizeof(vsi->tx_rings[i]->stats));
+ memset(&vsi->tx_rings[i]->tx_stats, 0,
+ sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -415,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
{
u64 new_data;
- if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
} else {
@@ -474,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
+ i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
@@ -531,10 +591,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -591,14 +652,14 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
return;
/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
}
}
@@ -645,14 +706,14 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *ring = &vsi->tx_rings[i];
+ struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc;
if (xoff[tc])
@@ -663,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/**
- * i40e_update_stats - Update the board statistics counters.
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
*
* There are a few instances where we store the same stat in a
* couple of different structs. This is partly because we have
* the netdev stats that need to be filled out, which is slightly
* different from the "eth_stats" defined by the chip and used in
- * VF communications. We sort it all out here in a central place.
+ * VF communications. We sort it out here.
**/
-void i40e_update_stats(struct i40e_vsi *vsi)
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
@@ -684,7 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
- int i;
u16 q;
if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -704,21 +763,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = 0;
rx_page = 0;
rx_buf = 0;
+ rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p;
+ u64 bytes, packets;
+ unsigned int start;
- p = &vsi->rx_rings[q];
- rx_b += p->rx_stats.bytes;
- rx_p += p->rx_stats.packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ /* locate Tx ring */
+ p = ACCESS_ONCE(vsi->tx_rings[q]);
- p = &vsi->tx_rings[q];
- tx_b += p->tx_stats.bytes;
- tx_p += p->tx_stats.packets;
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ tx_b += bytes;
+ tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
- }
+
+ /* Rx queue is part of the same block as Tx queue */
+ p = &p[1];
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp);
+ packets = p->stats.packets;
+ bytes = p->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ rx_b += bytes;
+ rx_p += packets;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
+ }
+ rcu_read_unlock();
vsi->tx_restart = tx_restart;
vsi->tx_busy = tx_busy;
vsi->rx_page_failed = rx_page;
@@ -729,182 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
ns->tx_packets = tx_p;
ns->tx_bytes = tx_b;
- i40e_update_eth_stats(vsi);
/* update netdev stats from eth stats */
- ons->rx_errors = oes->rx_errors;
- ns->rx_errors = es->rx_errors;
+ i40e_update_eth_stats(vsi);
ons->tx_errors = oes->tx_errors;
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
+ ons->rx_dropped = oes->rx_discards;
+ ns->rx_dropped = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
- /* Get the port data only if this is the main PF VSI */
+ /* pull in a couple PF stats if this is the main vsi */
if (vsi == pf->vsi[pf->lan_vsi]) {
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ ns->rx_crc_errors = pf->stats.crc_errors;
+ ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+ ns->rx_length_errors = pf->stats.rx_length_errors;
+ }
+}
- i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
- i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
+/**
+ * i40e_update_pf_stats - Update the pf statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+ int i;
- i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ns->rx_crc_errors = nsd->crc_errors;
+ i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
- i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ns->rx_errors = nsd->crc_errors
- + nsd->illegal_bytes;
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
- i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
- i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
- ns->rx_length_errors = nsd->rx_length_errors;
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
- i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- for (i = 0; i < 8; i++) {
- i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_rx[i],
- &nsd->priority_xon_rx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_tx[i],
- &nsd->priority_xon_tx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_tx[i],
- &nsd->priority_xoff_tx[i]);
- i40e_stat_update32(hw,
- I40E_GLPRT_RXON2OFFCNT(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_2_xoff[i],
- &nsd->priority_xon_2_xoff[i]);
- }
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
- i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
- i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
- i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
}
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+
+ /* FDIR stats */
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_match, &nsd->fd_atr_match);
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_sb_match, &nsd->fd_sb_match);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
pf->stat_offsets_loaded = true;
}
/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_update_pf_stats(pf);
+
+ i40e_update_vsi_stats(vsi);
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1002,7 +1152,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
+ is_vf, is_netdev))
return NULL;
}
}
@@ -1012,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+ i40e_status aq_ret;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (aq_ret)
+ dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+}
+
+/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1036,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!f)
goto add_filter_out;
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
f->changed = true;
@@ -1144,6 +1318,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1156,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
}
- memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
}
/* In order to be sure to not drop any packets, add the new address
@@ -1170,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
i40e_sync_vsi_filters(vsi);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
}
@@ -1197,6 +1375,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
+ u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
@@ -1218,6 +1397,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ /* Number of queues per enabled TC */
+ num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1225,30 +1407,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
int pow, num_qps;
- vsi->tc_config.tc_info[i].qoffset = offset;
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (i == 0)
- qcount = pf->rss_size;
- else
- qcount = pf->num_tc_qps;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
default:
- qcount = vsi->alloc_queue_pairs;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = num_tc_qps;
WARN_ON(i != 0);
break;
}
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
/* find the power-of-2 of the number of queue pairs */
- num_qps = vsi->tc_config.tc_info[i].qcount;
+ num_qps = qcount;
pow = 0;
- while (num_qps &&
- ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ while (num_qps && ((1 << pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1258,7 +1435,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
(offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
- offset += vsi->tc_config.tc_info[i].qcount;
+ offset += qcount;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
@@ -1428,17 +1605,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to delete list */
- memcpy(del_list[num_del].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1455,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status !=
+ I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
aq_ret,
@@ -1467,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list, num_del, NULL);
num_del = 0;
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
@@ -1496,20 +1670,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to add array */
- memcpy(add_list[num_add].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
add_list[num_add].vlan_tag =
cpu_to_le16(
(u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1575,6 +1742,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1593,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1610,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -1627,6 +1801,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -1708,7 +1903,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1734,13 +1928,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1761,7 +1948,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
return -ENOMEM;
}
}
+ }
+ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+ if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev)) {
@@ -1777,10 +1967,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1796,7 +1989,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1807,12 +1999,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1855,6 +2041,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -1877,11 +2067,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
- /* If the network stack called us with vid = 0, we should
- * indicate to i40e_vsi_add_vlan() that we want to receive
- * any traffic (i.e. with any vlan tag, or untagged)
+ /* If the network stack called us with vid = 0 then
+ * it is asking to receive priority tagged packets with
+ * vlan id 0. Our HW receives them by default when configured
+ * to receive untagged packets so there is no need to add an
+ * extra filter for vlan 0 tagged packets.
*/
- ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+ if (vid)
+ ret = i40e_vsi_add_vlan(vsi, vid);
if (!ret && (vid < VLAN_N_VID))
set_bit(vid, vsi->active_vlans);
@@ -1894,7 +2087,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
* @netdev: network interface to be adjusted
* @vid: vlan id to be removed
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for removing vlan ids
**/
static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid)
@@ -1945,8 +2138,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -1969,8 +2163,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -1988,7 +2183,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+ err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err;
}
@@ -2003,9 +2198,12 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i].desc)
- i40e_free_tx_resources(&vsi->tx_rings[i]);
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ i40e_free_tx_resources(vsi->tx_rings[i]);
}
/**
@@ -2023,7 +2221,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err;
}
@@ -2037,9 +2235,12 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i].desc)
- i40e_free_rx_resources(&vsi->rx_rings[i]);
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
+ i40e_free_rx_resources(vsi->rx_rings[i]);
}
/**
@@ -2058,7 +2259,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring->atr_count = 0;
} else {
@@ -2067,6 +2268,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* initialize XPS */
if (ring->q_vector && ring->netdev &&
+ vsi->tc_config.numtc <= 1 &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -2078,8 +2280,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED));
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+ /* FDIR VSI tx ring can still use RS bit and writebacks */
+ if (vsi->type != I40E_VSI_FDIR)
+ tx_ctx.head_wb_ena = 1;
+ tx_ctx.head_wb_addr = ring->dma +
+ (ring->count * sizeof(struct i40e_tx_desc));
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2113,9 +2321,12 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
- & I40E_QTX_CTL_PF_INDX_MASK);
+ if (vsi->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw);
@@ -2180,10 +2391,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2223,8 +2439,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
- err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+ for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+ err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err;
}
@@ -2274,7 +2490,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
- err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+ err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err;
}
@@ -2285,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
**/
static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
u16 qoffset, qcount;
int i, n;
@@ -2298,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2317,6 +2534,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
}
/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+ struct i40e_fdir_filter *filter;
+ struct i40e_pf *pf = vsi->back;
+ struct hlist_node *node;
+
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+
+ hlist_for_each_entry_safe(filter, node,
+ &pf->fdir_filter_list, fdir_node) {
+ i40e_add_del_fdir(vsi, filter, true);
+ }
+}
+
+/**
* i40e_vsi_configure - Set up the VSI for action
* @vsi: the VSI being configured
**/
@@ -2354,8 +2593,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
*/
qp = vsi->base_queue;
vector = vsi->base_vector;
- q_vector = vsi->q_vectors;
- for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ q_vector = vsi->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2414,7 +2653,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
- I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2422,8 +2661,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2435,7 +2674,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
**/
static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
{
- struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2453,7 +2692,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the q int */
+ /* Associate the queue pair to the vector and enable the queue int */
val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
(I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
(I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2469,10 +2708,23 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
@@ -2500,7 +2752,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- i40e_flush(hw);
+ /* skip the flush */
}
/**
@@ -2512,7 +2764,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{
struct i40e_q_vector *q_vector = data;
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
@@ -2521,23 +2773,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
- struct i40e_q_vector *q_vector = data;
-
- if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
- return IRQ_HANDLED;
-
- pr_info("fdir ring cleaning needed\n");
-
- return IRQ_HANDLED;
-}
-
-/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -2554,16 +2789,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int vector, err;
for (vector = 0; vector < q_vectors; vector++) {
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
- if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring[0]) {
+ } else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring[0]) {
+ } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2586,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
&q_vector->affinity_mask);
}
+ vsi->irqs_ready = true;
return 0;
free_queue_irqs:
@@ -2611,8 +2847,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
- wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
}
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2885,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
i40e_irq_dynamic_enable_icr0(pf);
}
+ i40e_flush(&pf->hw);
return 0;
}
@@ -2676,20 +2913,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
-
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2702,10 +2940,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state))
- napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
}
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2730,12 +2967,28 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER) {
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ } else if (val == I40E_RESET_GLOBR) {
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ } else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
+ set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ }
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ i40e_ptp_tx_hwtstamp(pf);
+ }
}
/* If a critical error is pending we have no choice but to reset the
@@ -2746,68 +2999,137 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
}
- return IRQ_HANDLED;
+ return ret;
}
/**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @r_idx: rx queue index
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
**/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
- rx_ring->q_vector = q_vector;
- q_vector->rx.ring[q_vector->rx.count] = rx_ring;
- q_vector->rx.count++;
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->vsi = vsi;
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+
+
+ /* move to the next desc and buffer to clean */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ tx_ring->q_vector->v_idx + vsi->base_vector);
+ }
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
+ return IRQ_HANDLED;
}
/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
* @vsi: the VSI being configured
* @v_idx: vector index
- * @t_idx: tx queue index
+ * @qp_idx: queue pair index
**/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
- struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
- struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+ struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector;
- q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ tx_ring->next = q_vector->tx.ring;
+ q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- q_vector->num_ringpairs++;
- q_vector->vsi = vsi;
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
}
/**
@@ -2823,7 +3145,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{
int qp_remaining = vsi->num_queue_pairs;
int q_vectors = vsi->num_q_vectors;
- int qp_per_vector;
+ int num_ringpairs;
int v_start = 0;
int qp_idx = 0;
@@ -2831,11 +3153,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
* group them so there are multiple queues per vector.
*/
for (; v_start < q_vectors && qp_remaining; v_start++) {
- qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
- for (; qp_per_vector;
- qp_per_vector--, qp_idx++, qp_remaining--) {
- map_vector_to_rxq(vsi, v_start, qp_idx);
- map_vector_to_txq(vsi, v_start, qp_idx);
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+ num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+ q_vector->num_ringpairs = num_ringpairs;
+
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+
+ while (num_ringpairs--) {
+ map_vector_to_qp(vsi, v_start, qp_idx);
+ qp_idx++;
+ qp_remaining--;
}
}
}
@@ -2887,7 +3219,7 @@ static void i40e_netpoll(struct net_device *netdev)
pf->flags |= I40E_FLAG_IN_NETPOLL;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
for (i = 0; i < vsi->num_q_vectors; i++)
- i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ i40e_msix_clean_rings(0, vsi->q_vectors[i]);
} else {
i40e_intr(pf->pdev->irq, netdev);
}
@@ -2909,48 +3241,38 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
- tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
- ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ udelay(10);
+
+ for (j = 0; j < 50; j++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
}
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
- if (enable)
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
- else
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ } else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -2961,6 +3283,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -2978,43 +3303,31 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
- ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
-
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
}
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+
/* turn on/off the queue */
if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
else
- rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK);
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3033,9 +3346,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
- int ret;
+ int ret = 0;
/* do rx first for enable and last for disable */
if (request) {
@@ -3044,10 +3357,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
return ret;
ret = i40e_vsi_control_tx(vsi, request);
} else {
- ret = i40e_vsi_control_tx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_rx(vsi, request);
+ /* Ignore return value, we need to shutdown whatever we can */
+ i40e_vsi_control_tx(vsi, request);
+ i40e_vsi_control_rx(vsi, request);
}
return ret;
@@ -3069,18 +3381,23 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
if (!vsi->q_vectors)
return;
+ if (!vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i].num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
free_irq(pf->msix_entries[vector].vector,
- &vsi->q_vectors[i]);
+ vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
*
@@ -3164,6 +3481,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
}
/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct i40e_ring *ring;
+
+ if (!q_vector)
+ return;
+
+ /* disassociate q_vector from rings */
+ i40e_for_each_ring(ring, q_vector->tx)
+ ring->q_vector = NULL;
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ ring->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+
+ vsi->q_vectors[v_idx] = NULL;
+
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
* @vsi: the VSI being un-configured
*
@@ -3174,24 +3524,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
- struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
- int r_idx;
-
- if (!q_vector)
- continue;
-
- /* disassociate q_vector from rings */
- for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
- q_vector->tx.ring[r_idx]->q_vector = NULL;
- for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
- q_vector->rx.ring[r_idx]->q_vector = NULL;
-
- /* only VSI w/ an associated netdev is set up w/ NAPI */
- if (vsi->netdev)
- netif_napi_del(&q_vector->napi);
- }
- kfree(vsi->q_vectors);
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ i40e_free_q_vector(vsi, v_idx);
}
/**
@@ -3223,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
i40e_reset_interrupt_capability(pf);
@@ -3241,7 +3575,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx].napi);
+ napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
@@ -3256,7 +3590,20 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx].napi);
+ napi_disable(&vsi->q_vectors[q_idx]->napi);
+}
+
+/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
}
/**
@@ -3272,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3290,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -3301,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_quiesce_vsi(pf->vsi[v]);
}
@@ -3315,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_unquiesce_vsi(pf->vsi[v]);
}
@@ -3468,7 +3814,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* Get the VSI level BW configuration per TC */
aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3522,8 +3868,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
- "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
+ "AQ command Config VSI BW allocation per TC failed = %d\n",
+ vsi->back->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -3679,6 +4025,206 @@ out:
}
/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "veb bw config failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = i40e_pf_get_default_tc(pf);
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ if (pf->hw.func_caps.npar_enable)
+ goto out;
+
+ /* Get the initial DCB configuration */
+ err = i40e_init_dcb(hw);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ goto out;
+
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ }
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+#define SPEED_SIZE 14
+#define FC_SIZE 8
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ */
+static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+ char speed[SPEED_SIZE] = "Unknown";
+ char fc[FC_SIZE] = "RX/TX";
+
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ strncpy(speed, "40 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ strncpy(speed, "10 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ strncpy(speed, "1000 Mbps", SPEED_SIZE);
+ break;
+ default:
+ break;
+ }
+
+ switch (vsi->back->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ strncpy(fc, "RX/TX", FC_SIZE);
+ break;
+ case I40E_FC_TX_PAUSE:
+ strncpy(fc, "TX", FC_SIZE);
+ break;
+ case I40E_FC_RX_PAUSE:
+ strncpy(fc, "RX", FC_SIZE);
+ break;
+ default:
+ strncpy(fc, "None", FC_SIZE);
+ break;
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
+
+/**
* i40e_up_complete - Finish the last steps of bringing up a connection
* @vsi: the VSI being configured
**/
@@ -3703,9 +4249,16 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
+ i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ } else if (vsi->netdev) {
+ i40e_print_link_message(vsi, false);
}
+
+ /* replay FDIR SB filters */
+ if (vsi->type == I40E_VSI_FDIR)
+ i40e_fdir_filter_restore(vsi);
i40e_service_event_schedule(pf);
return 0;
@@ -3772,8 +4325,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
- i40e_clean_tx_ring(&vsi->tx_rings[i]);
- i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ i40e_clean_tx_ring(vsi->tx_rings[i]);
+ i40e_clean_rx_ring(vsi->rx_rings[i]);
}
}
@@ -3852,15 +4405,48 @@ static int i40e_open(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- char int_name[IFNAMSIZ];
int err;
- /* disallow open during test */
- if (test_bit(__I40E_TESTING, &pf->state))
+ /* disallow open during test or if eeprom is broken */
+ if (test_bit(__I40E_TESTING, &pf->state) ||
+ test_bit(__I40E_BAD_EEPROM, &pf->state))
return -EBUSY;
netif_carrier_off(netdev);
+ err = i40e_vsi_open(vsi);
+ if (err)
+ return err;
+
+ /* configure global TSO hardware offload settings */
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
+ TCP_FLAG_FIN) >> 16);
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
+ TCP_FLAG_FIN |
+ TCP_FLAG_CWR) >> 16);
+ wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
+
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
/* allocate descriptors */
err = i40e_vsi_setup_tx_resources(vsi);
if (err)
@@ -3873,28 +4459,42 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
+ err = -EINVAL;
goto err_setup_rx;
+ }
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
-
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -3907,6 +4507,26 @@ err_setup_tx:
}
/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to pf
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *filter;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(filter, node2,
+ &pf->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ pf->fdir_pf_active_filters = 0;
+}
+
+/**
* i40e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -3921,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -3948,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
+
/* do the biggest reset indicated */
if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
@@ -3959,7 +4575,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* for the warning interrupt will deal with the shutdown
* and recovery of the switch setup.
*/
- dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -3970,12 +4586,30 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
*
* Same as Global Reset, except does *not* include the MAC/PHY
*/
- dev_info(&pf->pdev->dev, "CoreR requested\n");
+ dev_dbg(&pf->pdev->dev, "CoreR requested\n");
val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
val |= I40E_GLGEN_RTRIG_CORER_MASK;
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -3986,7 +4620,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
* the switch, since we need to do all the recovery as
* for the Core Reset.
*/
- dev_info(&pf->pdev->dev, "PFR requested\n");
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf);
} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -3995,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* Find the VSI(s) that requested a re-init */
dev_info(&pf->pdev->dev,
"VSI reinit requested\n");
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4013,6 +4647,154 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* Not DCB capable or capability disabled */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return ret;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Convert/store the DCBX data from LLDPDU temporarily */
+ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+ ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ if (ret) {
+ /* Error in LLDPDU parsing return */
+ dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+ /* Overwrite the new configuration */
+ *dcbx_cfg = tmp_dcbx_cfg;
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ i40e_pf_unquiesce_all_vsi(pf);
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
@@ -4032,8 +4814,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
struct i40e_vf *vf;
u16 vf_id;
- dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
- __func__, queue, qtx_ctl);
+ dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ queue, qtx_ctl);
/* Queue belongs to VF, find the VF and issue VF reset */
if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4058,11 +4840,58 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
/* flush memory to make sure state is correct before next watchog */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
}
/**
+ * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * @pf: board private structure
+ **/
+int i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+ int val, fcnt_prog;
+ val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+ fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+ ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+ u32 fcnt_prog, fcnt_avail;
+
+ /* Check if, FD SB or ATR was auto disabled and if there is enough room
+ * to re-enable
+ */
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ return;
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+ if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ }
+ }
+ /* Wait for some more space to be available to turn on ATR */
+ if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ }
+ }
+}
+
+/**
* i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
* @pf: board private structure
**/
@@ -4071,11 +4900,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
return;
- pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
-
/* if interface is down do nothing */
if (test_bit(__I40E_DOWN, &pf->state))
return;
+ i40e_fdir_check_and_reenable(pf);
+
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
}
/**
@@ -4134,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
i40e_veb_link_event(pf->veb[i], link_up);
/* ... now the local VSIs */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
i40e_vsi_link_event(pf->vsi[i], link_up);
}
@@ -4152,9 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
-
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4166,6 +4997,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
}
/**
@@ -4189,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
* for each q_vector
* force an interrupt
*/
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
int armed = 0;
@@ -4199,9 +5033,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- set_check_for_tx_hang(&vsi->tx_rings[i]);
+ set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED,
- &vsi->tx_rings[i].state))
+ &vsi->tx_rings[i]->state))
armed++;
}
@@ -4239,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
@@ -4247,6 +5081,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
i40e_update_veb_stats(pf->veb[i]);
+
+ i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
/**
@@ -4257,6 +5093,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4279,7 +5116,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4287,6 +5124,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4339,17 +5179,54 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u16 pending, i = 0;
i40e_status ret;
u16 opcode;
+ u32 oldval;
u32 val;
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
+ /* check for error indications */
+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ oldval = val;
+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ oldval = val;
+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4374,16 +5251,24 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
event.msg_size);
break;
case i40e_aqc_opc_lldp_update_mib:
- dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ ret = i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
- dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4399,6 +5284,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
}
/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+ int err;
+
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ /* retry in case of garbage read */
+ err = i40e_diag_eeprom_test(&pf->hw);
+ if (err) {
+ dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+ err);
+ set_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+ }
+
+ if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+ dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+ clear_bit(__I40E_BAD_EEPROM, &pf->state);
+ }
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -4415,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
int ret;
/* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
if (pf->vsi[v] &&
pf->vsi[v]->veb_idx == veb->idx &&
pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -4445,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
goto end_reconstitute;
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
continue;
@@ -4513,6 +5423,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
+ (pf->hw.aq.fw_maj_ver < 2)) {
+ pf->hw.func_caps.num_msix_vectors++;
+ pf->hw.func_caps.num_msix_vectors_vf++;
+ }
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4524,56 +5440,53 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
/**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
* @pf: board private structure
**/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
- /* find existing or make new FDIR VSI */
+ /* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
+ break;
+ }
+ }
+
+ /* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
return;
}
- new_vsi = true;
- }
- WARN_ON(vsi->base_queue != I40E_FDIR_RING);
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
-
- err = i40e_vsi_setup_tx_resources(vsi);
- if (!err)
- err = i40e_vsi_setup_rx_resources(vsi);
- if (!err)
- err = i40e_vsi_configure(vsi);
- if (!err && new_vsi) {
- char int_name[IFNAMSIZ + 9];
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
}
- if (!err)
- err = i40e_up_complete(vsi);
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -4584,7 +5497,8 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
{
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ i40e_fdir_filter_exit(pf);
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
break;
@@ -4593,49 +5507,86 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ i40e_status ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
-
- dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+ return 0;
- i40e_vc_notify_reset(pf);
+ dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
pf->vsi[v]->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ if (hw->hmc.hmc_obj) {
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_warn(&pf->pdev->dev,
+ "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ }
+ return ret;
+}
+
+/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
*/
ret = i40e_pf_reset(hw);
- if (ret)
+ if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ goto end_core_reset;
+ }
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
goto end_core_reset;
- dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+ dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
@@ -4644,6 +5595,13 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+ /* re-verify the eeprom if we just had an EMP reset */
+ if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
+ clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ i40e_verify_eeprom(pf);
+ }
+
+ i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -4651,13 +5609,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4671,8 +5622,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+#ifdef CONFIG_I40E_DCB
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+ goto end_core_reset;
+ }
+#endif /* CONFIG_I40E_DCB */
+
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4684,7 +5643,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
* try to recover minimal use by getting the basic PF VSI working.
*/
if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
- dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
/* find the one VEB connected to the MAC, and find orphans */
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
@@ -4737,20 +5696,35 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
- /* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+ if (pf->num_alloc_vfs) {
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_reset_vf(&pf->vf[v], true);
+ }
- dev_info(&pf->pdev->dev, "PF reset done\n");
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4777,7 +5751,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
>> I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -4791,7 +5765,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
>> I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
- "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
mdd_detected = true;
@@ -4831,6 +5805,50 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4849,6 +5867,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -4926,6 +5947,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -4950,26 +6007,26 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
* find next empty vsi slot, looping back around if necessary
*/
i = pf->next_vsi;
- while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ while (i < pf->num_alloc_vsi && pf->vsi[i])
i++;
- if (i >= pf->hw.func_caps.num_vsis) {
+ if (i >= pf->num_alloc_vsi) {
i = 0;
while (i < pf->next_vsi && pf->vsi[i])
i++;
}
- if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
- goto err_alloc_vsi; /* out of VSI slots! */
+ goto unlock_pf; /* out of VSI slots! */
}
pf->next_vsi = ++i;
vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
if (!vsi) {
ret = -ENOMEM;
- goto err_alloc_vsi;
+ goto unlock_pf;
}
vsi->type = type;
vsi->back = pf;
@@ -4981,20 +6038,52 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
+ vsi->irqs_ready = false;
- i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_set_num_rings_in_vsi(vsi);
+ if (ret)
+ goto err_rings;
+
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
+ goto err_rings;
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
pf->vsi[vsi_idx] = vsi;
ret = vsi_idx;
-err_alloc_vsi:
+ goto unlock_pf;
+
+err_rings:
+ pf->next_vsi = i - 1;
+ kfree(vsi);
+unlock_pf:
mutex_unlock(&pf->switch_mutex);
return ret;
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5030,6 +6119,8 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ i40e_vsi_free_arrays(vsi, true);
+
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
@@ -5043,34 +6134,38 @@ free_vsi:
}
/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ kfree_rcu(vsi->tx_rings[i], rcu);
+ vsi->tx_rings[i] = NULL;
+ vsi->rx_rings[i] = NULL;
+ }
+ }
+}
+
+/**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_pf *pf = vsi->back;
- int ret = 0;
int i;
- vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->rx_rings) {
- ret = -ENOMEM;
- goto err_alloc_rings;
- }
-
- vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
- sizeof(struct i40e_ring), GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
- kfree(vsi->rx_rings);
- goto err_alloc_rings;
- }
-
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *rx_ring = &vsi->rx_rings[i];
- struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ /* allocate space for both Tx and Rx in one shot */
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +6176,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc;
tx_ring->size = 0;
tx_ring->dcb_tc = 0;
+ vsi->tx_rings[i] = tx_ring;
+ rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false;
@@ -5095,24 +6192,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
- }
-
-err_alloc_rings:
- return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
- if (vsi) {
- kfree(vsi->rx_rings);
- kfree(vsi->tx_rings);
+ vsi->rx_rings[i] = rx_ring;
}
return 0;
+
+err_out:
+ i40e_vsi_clear_rings(vsi);
+ return -ENOMEM;
}
/**
@@ -5124,34 +6211,11 @@ static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
{
- int err = 0;
-
- pf->num_msix_entries = 0;
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- pf->num_msix_entries = vectors;
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- } else {
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
- }
-
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
+ "MSI-X vector reservation failed: %d\n", vectors);
vectors = 0;
}
@@ -5179,19 +6243,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
/* Scale down if necessary, and the rings will share vectors */
@@ -5205,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
vec = i40e_reserve_msix_vectors(pf, v_budget);
+
+ if (vec != v_budget) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
+ pf->num_vmdq_msix = 0;
+ }
+
if (vec < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -5213,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
- pf->num_vmdq_msix = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (vec != v_budget) {
+ /* reserve the misc vector */
+ vec--;
+
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
- vec--; /* reserve the misc vector */
+ pf->num_vmdq_vsis = 1;
/* partition out the remaining vectors */
switch (vec) {
case 2:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 1;
break;
case 3:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 2;
break;
default:
@@ -5245,20 +6320,58 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ (pf->num_vmdq_msix == 0)) {
+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ }
return err;
}
/**
- * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+ struct i40e_q_vector *q_vector;
+
+ /* allocate q_vector */
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ q_vector->vsi = vsi;
+ q_vector->v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &q_vector->napi,
+ i40e_napi_poll, NAPI_POLL_WEIGHT);
+
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+ /* tie q_vector and vsi together */
+ vsi->q_vectors[v_idx] = q_vector;
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
-static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors;
+ int err;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +6381,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- vsi->q_vectors = kcalloc(num_q_vectors,
- sizeof(struct i40e_q_vector),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- return -ENOMEM;
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- vsi->q_vectors[v_idx].vsi = vsi;
- vsi->q_vectors[v_idx].v_idx = v_idx;
- cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
- if (vsi->netdev)
- netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
- i40e_napi_poll, vsi->work_limit);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ if (err)
+ goto err_out;
}
return 0;
+
+err_out:
+ while (v_idx--)
+ i40e_free_q_vector(vsi, v_idx);
+
+ return err;
}
/**
@@ -5297,13 +6407,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -5312,14 +6422,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
- dev_info(&pf->pdev->dev,
- "MSI init failed (%d), trying legacy.\n", err);
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
}
+ if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
+
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
}
@@ -5345,7 +6458,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_intr, 0, pf->misc_int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
- "request_irq for msix_misc failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ pf->misc_int_name, err);
return -EFAULT;
}
}
@@ -5369,15 +6483,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5386,16 +6500,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5424,6 +6529,33 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5438,6 +6570,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5449,39 +6582,50 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Set default ITR */
+ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
} else {
pf->rss_size = 1;
}
- if (pf->hw.func_caps.dcb)
- pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
- else
- pf->num_tc_qps = 0;
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
- if (pf->hw.func_caps.fd) {
- /* FW/NVM is not yet fixed in this regard */
- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
- pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ /* Setup a counter for fd_atr per pf */
+ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* Setup a counter for fd_sb per pf */
+ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
+ } else {
dev_info(&pf->pdev->dev,
- "Flow Director Side Band mode Enabled\n");
- pf->fdir_pf_filter_count =
- pf->hw.func_caps.fd_filters_guaranteed;
+ "Flow Director Sideband mode Disabled in MFP mode\n");
}
- } else {
- pf->fdir_pf_filter_count = 0;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
}
if (pf->hw.func_caps.vmdq) {
@@ -5490,12 +6634,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5539,6 +6677,39 @@ sw_init_done:
}
/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+ bool need_reset = false;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (features & NETIF_F_NTUPLE) {
+ /* Enable filters and mark for reset */
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+ need_reset = true;
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ } else {
+ /* turn off filters, mark for reset and clear SW filter list */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ need_reset = true;
+ i40e_fdir_filter_exit(pf);
+ }
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ /* if ATR was disabled it can be re-enabled. */
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ }
+ return need_reset;
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -5548,15 +6719,210 @@ static int i40e_set_features(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ bool need_reset;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
i40e_vlan_stripping_enable(vsi);
else
i40e_vlan_stripping_disable(vsi);
+ need_reset = i40e_set_ntuple(pf, features);
+
+ if (need_reset)
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5566,6 +6932,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
+ .ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -5576,8 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
- .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+ .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -5588,6 +6968,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5604,10 +6985,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO |
- NETIF_F_SG;
+ NETIF_F_TSO;
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -5619,17 +6999,29 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
- memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following two steps are necessary to prevent reception
+ * of tagged packets - by default the NVM loads a MAC-VLAN
+ * filter that will accept any tagged packet. This is to
+ * prevent that during normal operations until a specific
+ * VLAN tag filter has been set.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -5637,9 +7029,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
/* vlan gets same features (except vlan offload)
* after any tweaks for specific VSI types
*/
@@ -5670,12 +7063,7 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
return;
- /* there is no HW VSI for FDIR */
- if (vsi->type == I40E_VSI_FDIR)
- return;
-
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
- return;
}
/**
@@ -5757,12 +7145,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
case I40E_VSI_FDIR:
- /* no queue mapping or actual HW VSI needed */
- vsi->info.valid_sections = 0;
- vsi->seid = 0;
- vsi->id = 0;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
- return 0;
break;
case I40E_VSI_VMDQ2:
@@ -5801,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.info.sec_flags |=
+ (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -5883,15 +7278,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
if (vsi->netdev) {
/* results in a call to i40e_close() */
unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -5903,6 +7292,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev) {
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
i40e_vsi_clear_rings(vsi);
i40e_vsi_clear(vsi);
@@ -5914,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
* the orphan VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] &&
pf->vsi[i]->uplink_seid == uplink_seid &&
(pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -5950,20 +7343,19 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
int ret = -ENOENT;
struct i40e_pf *pf = vsi->back;
- if (vsi->q_vectors) {
+ if (vsi->q_vectors[0]) {
dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
vsi->seid);
return -EEXIST;
}
if (vsi->base_vector) {
- dev_info(&pf->pdev->dev,
- "VSI %d has non-zero base vector %d\n",
+ dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
vsi->seid, vsi->base_vector);
return -EEXIST;
}
- ret = i40e_alloc_q_vectors(vsi);
+ ret = i40e_vsi_alloc_q_vectors(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"failed to allocate %d q_vector for VSI %d, ret=%d\n",
@@ -5972,11 +7364,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out;
}
- vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
- vsi->num_q_vectors, vsi->idx);
+ if (vsi->num_q_vectors)
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev,
- "failed to get q tracking for VSI %d, err=%d\n",
+ "failed to get queue tracking for VSI %d, err=%d\n",
vsi->seid, vsi->base_vector);
i40e_vsi_free_q_vectors(vsi);
ret = -ENOENT;
@@ -5988,6 +7381,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6030,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (!veb && uplink_seid != pf->mac_seid) {
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
vsi = pf->vsi[i];
break;
@@ -6067,6 +7523,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (v_idx < 0)
goto err_alloc;
vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
vsi->type = type;
vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
@@ -6075,7 +7533,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+ vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
vsi->seid, ret);
@@ -6101,6 +7560,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
goto err_netdev;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
/* fall through */
case I40E_VSI_FDIR:
@@ -6266,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -6318,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
n++;
vsi = pf->vsi[i];
@@ -6347,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
i40e_veb_clear(veb);
-
- return;
}
/**
@@ -6358,12 +7819,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6430,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
- if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
@@ -6468,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
if (ret)
goto err_veb;
+ if (vsi_idx == pf->lan_vsi)
+ pf->lan_veb = veb->idx;
return veb;
@@ -6603,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
"header: %d reported %d total\n",
num_reported, num_total);
- if (num_reported) {
- int sz = sizeof(*sw_config) * num_reported;
-
- kfree(pf->sw_config);
- pf->sw_config = kzalloc(sz, GFP_KERNEL);
- if (pf->sw_config)
- memcpy(pf->sw_config, sw_config, sz);
- }
-
for (i = 0; i < num_reported; i++) {
struct i40e_aqc_switch_config_element_resp *ele =
&sw_config->element[i];
@@ -6628,11 +8084,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6645,14 +8103,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_pf_reset_stats(pf);
- /* fdir VSI must happen first to be sure it gets queue 0, but only
- * if there is enough room for the fdir VSI
- */
- if (pf->num_lan_qps > 1)
- i40e_fdir_setup(pf);
-
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6663,19 +8115,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6685,6 +8133,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_sb_setup(pf);
+
/* Setup static PF queue filter control settings */
ret = i40e_setup_pf_filter_control(pf);
if (ret) {
@@ -6703,37 +8153,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
- return ret;
-}
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
- int num_tc0;
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
+
+ goto fc_complete;
- num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
- num_tc0 = rounddown_pow_of_two(num_tc0);
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
- return num_tc0;
+fc_complete:
+ i40e_ptp_init(pf);
+
+ return ret;
}
/**
@@ -6742,12 +8223,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
**/
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
- int accum_tc_size;
int queues_left;
pf->num_lan_qps = 0;
- pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
- accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -6755,99 +8233,56 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
- (queues_left == 1)) {
-
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save num_tc_qps queues for TCs 1 thru 7 and the rest
- * are set up for RSS in TC0
- */
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
- return;
- }
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- queues_left -= 1; /* save 1 queue for FD */
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE))) {
+ /* one qp for PF */
+ pf->rss_size = pf->num_lan_qps = 1;
+ queues_left -= pf->num_lan_qps;
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
- return;
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
+ pf->num_lan_qps = pf->rss_size_max;
+ queues_left -= pf->num_lan_qps;
+ }
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save 1 queue for TCs 1 thru 7,
- * 1 queue for flow director,
- * and the rest are set up for RSS in TC0
- */
- queues_left -= 1;
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
- return;
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else {
- dev_info(&pf->pdev->dev,
- "Invalid configuration, flags=0x%08llx\n", pf->flags);
- return;
}
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
- pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
- pf->num_vf_qps));
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
@@ -6858,7 +8293,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
- return;
+ pf->queues_left = queues_left;
}
/**
@@ -6879,7 +8314,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -6892,6 +8327,45 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
return 0;
}
+#define INFO_STRING_LEN 255
+static void i40e_print_features(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ char *buf, *string;
+
+ string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+ if (!string) {
+ dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+ return;
+ }
+
+ buf = string;
+
+ buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+ buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+#endif
+ buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
+
+ if (pf->flags & I40E_FLAG_RSS_ENABLED)
+ buf += sprintf(buf, "RSS ");
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+ buf += sprintf(buf, "FD_ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ buf += sprintf(buf, "FD_SB ");
+ buf += sprintf(buf, "NTUPLE ");
+ }
+ if (pf->flags & I40E_FLAG_DCB_CAPABLE)
+ buf += sprintf(buf, "DCB ");
+ if (pf->flags & I40E_FLAG_PTP)
+ buf += sprintf(buf, "PTP ");
+
+ BUG_ON(buf > (string + INFO_STRING_LEN));
+ dev_info(&pf->pdev->dev, "%s\n", string);
+ kfree(string);
+}
+
/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -6905,28 +8379,27 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
return err;
/* set up for high or low dma */
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /* coherent mask for the same size will always succeed if
- * dma_set_mask does
- */
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- } else {
- dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
- err = -EIO;
- goto err_dma;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
}
/* set up pci connections */
@@ -6973,6 +8446,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -6997,6 +8482,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
if (err) {
@@ -7007,6 +8495,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_verify_eeprom(pf);
+
+ /* Rev 0 hardware was never productized */
+ if (hw->revision_id < 1)
+ dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
+ i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
goto err_adminq_setup;
@@ -7033,16 +8528,24 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
}
dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
- memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
+ }
+#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7053,23 +8556,44 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
- /* Set up the *vsi struct based on the number of VSIs in the HW,
- * and set up our local tracking of the MAIN PF vsi.
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
*/
- len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+ len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
pf->vsi = kzalloc(len, GFP_KERNEL);
- if (!pf->vsi)
+ if (!pf->vsi) {
+ err = -ENOMEM;
goto err_switch_setup;
+ }
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -7091,9 +8615,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
- (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
u32 val;
/* disable link interrupts for VFs */
@@ -7101,29 +8627,64 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
}
+#endif /* CONFIG_PCI_IOV */
+
+ pfs_found++;
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+ i40e_send_version(pf);
/* since everything's happy, start the service_task timer */
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI-Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
+ /* print a string summarizing features */
+ i40e_print_features(pf);
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
err_mac_addr:
err_configure_lan_hmc:
@@ -7166,16 +8727,18 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_dbg_pf_exit(pf);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
+ i40e_ptp_stop(pf);
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
@@ -7203,13 +8766,15 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown and destroy the HMC */
- ret_code = i40e_shutdown_lan_hmc(&pf->hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the HMC resources: %d\n", ret_code);
+ if (pf->hw.hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n",
+ ret_code);
+ }
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7218,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
@@ -7233,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->irq_pile);
- kfree(pf->sw_config);
kfree(pf->vsi);
/* force a PF reset to clean anything leftover */
@@ -7266,7 +8830,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7329,9 +8897,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
+}
+
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7343,6 +9005,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 97e1bb30ef8..81299189a47 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,14 +27,14 @@
#include "i40e_prototype.h"
/**
- * i40e_init_nvm_ops - Initialize NVM function pointers.
- * @hw: pointer to the HW structure.
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
*
- * Setups the function pointers and the NVM info structure. Should be called
- * once per NVM initialization, e.g. inside the i40e_init_shared_code().
- * Please notice that the NVM term is used here (& in all methods covered
- * in this file) as an equivalent of the FLASH part mapped into the SR.
- * We are accessing FLASH always thru the Shadow RAM.
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
**/
i40e_status i40e_init_nvm(struct i40e_hw *hw)
{
@@ -50,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
gens = rd32(hw, I40E_GLNVM_GENS);
sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
I40E_GLNVM_GENS_SR_SIZE_SHIFT);
- /* Switching to words (sr_size contains power of 2KB). */
+ /* Switching to words (sr_size contains power of 2KB) */
nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
- /* Check if we are in the normal or blank NVM programming mode. */
+ /* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, I40E_GLNVM_FLA);
- if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
- /* Max NVM timeout. */
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+ /* Max NVM timeout */
nvm->timeout = I40E_MAX_NVM_TIMEOUT;
nvm->blank_nvm_mode = false;
- } else { /* Blank programming mode. */
+ } else { /* Blank programming mode */
nvm->blank_nvm_mode = true;
ret_code = I40E_ERR_NVM_BLANK_MODE;
hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -69,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
}
/**
- * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
- * @hw: pointer to the HW structure.
- * @access: NVM access type (read or write).
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
*
- * This function will request NVM ownership for reading
- * via the proper Admin Command.
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
**/
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access)
@@ -88,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0, &time, NULL);
- /* Reading the Global Device Timer. */
+ /* Reading the Global Device Timer */
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
- /* Store the timeout. */
+ /* Store the timeout */
hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
if (ret_code) {
- /* Set the polling timeout. */
+ /* Set the polling timeout */
if (time > I40E_MAX_NVM_TIMEOUT)
timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ gtime;
else
timeout = hw->nvm.hw_semaphore_timeout;
- /* Poll until the current NVM owner timeouts. */
+ /* Poll until the current NVM owner timeouts */
while (gtime < timeout) {
usleep_range(10000, 20000);
ret_code = i40e_aq_request_resource(hw,
@@ -129,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
}
/**
- * i40e_release_nvm - Generic request for releasing the NVM ownership.
- * @hw: pointer to the HW structure.
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
*
- * This function will release NVM resource via the proper Admin Command.
+ * This function will release NVM resource via the proper Admin Command.
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
@@ -141,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
}
/**
- * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
- * @hw: pointer to the HW structure.
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
*
- * Polls the SRCTL Shadow RAM register done bit.
+ * Polls the SRCTL Shadow RAM register done bit.
**/
static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
- /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
srctl = rd32(hw, I40E_GLNVM_SRCTL);
if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -161,20 +160,20 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
udelay(5);
}
if (ret_code == I40E_ERR_TIMEOUT)
- hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+ hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
return ret_code;
}
/**
- * i40e_read_nvm_srctl - Reads Shadow RAM.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
*
- * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -185,15 +184,15 @@ static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
goto read_nvm_exit;
}
- /* Poll the done bit first. */
+ /* Poll the done bit first */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
- /* Write the address and start reading. */
+ /* Write the address and start reading */
sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
(1 << I40E_GLNVM_SRCTL_START_SHIFT);
wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
- /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set */
ret_code = i40e_poll_sr_srctl_done_bit(hw);
if (!ret_code) {
sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -211,80 +210,45 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM word.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @data: word read from the Shadow RAM.
- *
- * Reads 16 bit word from the Shadow RAM. Each read is preceded
- * with the NVM ownership taking and followed by the release.
- **/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
-{
- i40e_status ret_code = 0;
-
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- ret_code = i40e_read_nvm_srctl(hw, offset, data);
- i40e_release_nvm(hw);
- }
-
- return ret_code;
-}
-
-/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
- * @hw: pointer to the HW structure.
- * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- * @words: number of words to read (in) &
- * number of words read before the NVM ownership timeout (out).
- * @data: words read from the Shadow RAM.
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
*
- * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- * method. The buffer read is preceded by the NVM ownership take
- * and followed by the release.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
**/
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
i40e_status ret_code = 0;
u16 index, word;
- u32 time;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
- /* Loop thru the selected region. */
- for (word = 0; word < *words; word++) {
- index = offset + word;
- ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
- if (ret_code)
- break;
- /* Check if we didn't exceeded the semaphore timeout. */
- time = rd32(hw, I40E_GLVFGEN_TIMER);
- if (time >= hw->nvm.hw_semaphore_timeout) {
- ret_code = I40E_ERR_TIMEOUT;
- hw_dbg(hw, "NVM read error: timeout.\n");
- break;
- }
- }
- /* Update the number of words read from the Shadow RAM. */
- *words = word;
- /* Release the NVM ownership. */
- i40e_release_nvm(hw);
+ /* Loop thru the selected region */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ if (ret_code)
+ break;
}
+ /* Update the number of words read from the Shadow RAM */
+ *words = word;
+
return ret_code;
}
/**
- * i40e_calc_nvm_checksum - Calculates and returns the checksum
- * @hw: pointer to hardware structure
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
*
- * This function calculate SW Checksum that covers the whole 64kB shadow RAM
- * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
- * is customer specific and unknown. Therefore, this function skips all maximum
- * possible size of VPD (1kB).
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
**/
static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
@@ -297,14 +261,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u32 i = 0;
/* read pointer to VPD area */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
- ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@@ -331,7 +295,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
break;
}
- ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -346,19 +310,19 @@ i40e_calc_nvm_checksum_exit:
}
/**
- * i40e_validate_nvm_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
- * @checksum: calculated checksum
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
*
- * Performs checksum calculation and validates the NVM SW checksum. If the
- * caller does not need checksum, the value can be NULL.
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
**/
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
u16 checksum_sr = 0;
- u16 checksum_local;
+ u16 checksum_local = 0;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
@@ -371,7 +335,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
/* Do not use i40e_read_nvm_word() because we do not want to take
* the synchronization semaphores twice here.
*/
- i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+ i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* Verify read checksum from EEPROM is the same as
* calculated checksum
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 702c81ba86e..ecd0f0b663c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index f75bb9ccc90..a430699c41d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-bool i40e_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw,
@@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
-void i40e_led_set(struct i40e_hw *hw, u32 mode);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
/* admin send queue commands */
@@ -71,12 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading);
-i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -95,9 +94,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -106,7 +105,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
+ bool default_port, bool enable_l2_filtering,
+ u16 *pveb_seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -119,12 +119,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_add_remove_vlan_element_data *v_list,
- u8 count, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -164,11 +158,22 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
@@ -179,6 +184,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
@@ -207,8 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -222,6 +235,14 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40e_ptype_lookup[ptype];
+}
/* prototype for functions used for SW locks */
@@ -236,4 +257,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_set_filter_control(struct i40e_hw *hw,
struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
new file mode 100644
index 00000000000..101f439acda
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -0,0 +1,625 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+#include <linux/export.h>
+#include <linux/ptp_classify.h>
+
+/* The XL710 timesync is very much like Intel's 82599 design when it comes to
+ * the fundamental clock design. However, the clock operations are much simpler
+ * in the XL710 because the device supports a full 64 bits of nanoseconds.
+ * Because the field is so wide, we can forgo the cycle counter and just
+ * operate with the nanosecond field directly without fear of overflow.
+ *
+ * Much like the 82599, the update period is dependent upon the link speed:
+ * At 40Gb link or no link, the period is 1.6ns.
+ * At 10Gb link, the period is multiplied by 2. (3.2ns)
+ * At 1Gb link, the period is multiplied by 20. (32ns)
+ * 1588 functionality is not supported at 100Mbps.
+ */
+#define I40E_PTP_40GB_INCVAL 0x0199999999ULL
+#define I40E_PTP_10GB_INCVAL 0x0333333333ULL
+#define I40E_PTP_1GB_INCVAL 0x2000000000ULL
+
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 (0x1 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
+ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+
+/**
+ * i40e_ptp_read - Read the PHC time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * This function reads the PRTTSYN_TIME registers and stores them in a
+ * timespec. However, since the registers are 64 bits of nanoseconds, we must
+ * convert the result to a timespec before we can return.
+ **/
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ /* The timer latches on the lowest register read. */
+ lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ *ts = ns_to_timespec(ns);
+}
+
+/**
+ * i40e_ptp_write - Write the PHC time to the device
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * This function writes the PRTTSYN_TIME registers with the user value. Since
+ * we receive a timespec from the stack, we must convert that timespec into
+ * nanoseconds before programming the registers.
+ **/
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 ns = timespec_to_ns(ts);
+
+ /* The timer will not update until the high register is written, so
+ * write the low register first.
+ */
+ wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32);
+}
+
+/**
+ * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time
+ * @hwtstamps: Timestamp structure to update
+ * @timestamp: Timestamp from the hardware
+ *
+ * We need to convert the NIC clock value into a hwtstamp which can be used by
+ * the upper level timestamping functions. Since the timestamp is simply a 64-
+ * bit nanosecond value, we can call ns_to_ktime directly to handle this.
+ **/
+static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
+ u64 timestamp)
+{
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
+ hwtstamps->hwtstamp = ns_to_ktime(timestamp);
+}
+
+/**
+ * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * @ptp: The PTP clock structure
+ * @ppb: Parts per billion adjustment from the base
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct i40e_hw *hw = &pf->hw;
+ u64 adj, freq, diff;
+ int neg_adj = 0;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ smp_mb(); /* Force any pending update before accessing. */
+ adj = ACCESS_ONCE(pf->ptp_base_adj);
+
+ freq = adj;
+ freq *= ppb;
+ diff = div_u64(freq, 1000000000ULL);
+
+ if (neg_adj)
+ adj -= diff;
+ else
+ adj += diff;
+
+ wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_adjtime - Adjust the PHC time
+ * @ptp: The PTP clock structure
+ * @delta: Offset in nanoseconds to adjust the PHC time by
+ *
+ * Adjust the frequency of the PHC by the indicated parts per billion from the
+ * base frequency.
+ **/
+static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ struct timespec now, then = ns_to_timespec(delta);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+
+ i40e_ptp_read(pf, &now);
+ now = timespec_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec *)&now);
+
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_gettime - Get the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ **/
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_read(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_settime - Set the time of the PHC
+ * @ptp: The PTP clock structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ **/
+static int i40e_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pf->tmreg_lock, flags);
+ i40e_ptp_write(pf, ts);
+ spin_unlock_irqrestore(&pf->tmreg_lock, flags);
+
+ return 0;
+}
+
+/**
+ * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * @ptp: The PTP clock structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ *
+ * The XL710 does not support any of the ancillary features of the PHC
+ * subsystem, so this function may just return.
+ **/
+static int i40e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
+ * @vsi: The VSI with the rings relevant to 1588
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ **/
+void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_ring *rx_ring;
+ unsigned long rx_event;
+ u32 prttsyn_stat;
+ int n;
+
+ if (pf->flags & I40E_FLAG_PTP)
+ return;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ /* Unless all four receive timestamp registers are latched, we are not
+ * concerned about a possible PTP Rx hang, so just update the timeout
+ * counter and exit.
+ */
+ if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT1_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT2_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
+ (I40E_PRTTSYN_STAT_1_RXT3_MASK <<
+ I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
+ pf->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event. */
+ rx_event = pf->last_rx_ptp_check;
+ for (n = 0; n < vsi->num_queue_pairs; n++) {
+ rx_ring = vsi->rx_rings[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+ pf->last_rx_ptp_check = jiffies;
+ pf->rx_hwtstamp_cleared++;
+ dev_warn(&vsi->back->pdev->dev,
+ "%s: clearing Rx timestamp hang\n",
+ __func__);
+ }
+}
+
+/**
+ * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp
+ * @pf: Board private structure
+ *
+ * Read the value of the Tx timestamp from the registers, convert it into a
+ * value consumable by the stack, and store that result into the shhwtstamps
+ * struct before returning it up the stack.
+ **/
+void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct i40e_hw *hw = &pf->hw;
+ u32 hi, lo;
+ u64 ns;
+
+ lo = rd32(hw, I40E_PRTTSYN_TXTIME_L);
+ hi = rd32(hw, I40E_PRTTSYN_TXTIME_H);
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns);
+ skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+}
+
+/**
+ * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp
+ * @pf: Board private structure
+ * @skb: Particular skb to send timestamp with
+ * @index: Index into the receive timestamp registers for the timestamp
+ *
+ * The XL710 receives a notification in the receive descriptor with an offset
+ * into the set of RXTIME registers where the timestamp is for that skb. This
+ * function goes and fetches the receive timestamp from that offset, if a valid
+ * one exists. The RXTIME registers are in ns, so we must convert the result
+ * first.
+ **/
+void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
+{
+ u32 prttsyn_stat, hi, lo;
+ struct i40e_hw *hw;
+ u64 ns;
+
+ /* Since we cannot turn off the Rx timestamp logic if the device is
+ * doing Tx timestamping, check if Rx timestamping is configured.
+ */
+ if (!pf->ptp_rx)
+ return;
+
+ hw = &pf->hw;
+
+ prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
+
+ if (!(prttsyn_stat & (1 << index)))
+ return;
+
+ lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
+ hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
+
+ ns = (((u64)hi) << 32) | lo;
+
+ i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
+}
+
+/**
+ * i40e_ptp_set_increment - Utility function to update clock increment rate
+ * @pf: Board private structure
+ *
+ * During a link change, the DMA frequency that drives the 1588 logic will
+ * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds,
+ * we must update the increment value per clock tick.
+ **/
+void i40e_ptp_set_increment(struct i40e_pf *pf)
+{
+ struct i40e_link_status *hw_link_info;
+ struct i40e_hw *hw = &pf->hw;
+ u64 incval;
+
+ hw_link_info = &hw->phy.link_info;
+
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+
+ switch (hw_link_info->link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ incval = I40E_PTP_10GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ incval = I40E_PTP_1GB_INCVAL;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ dev_warn(&pf->pdev->dev,
+ "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n",
+ __func__);
+ incval = 0;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ default:
+ incval = I40E_PTP_40GB_INCVAL;
+ break;
+ }
+
+ /* Write the new increment value into the increment register. The
+ * hardware will not update the clock until both registers have been
+ * written.
+ */
+ wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF);
+ wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
+
+ /* Update the base adjustement value. */
+ ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ smp_mb(); /* Force the above update. */
+}
+
+/**
+ * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Obtain the current hardware timestamping settigs as requested. To do this,
+ * keep a shadow copy of the timestamp settings rather than attempting to
+ * deconstruct it from the registers.
+ **/
+int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &pf->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct hwtstamp_config *config = &pf->tstamp_config;
+ u32 pf_id, tsyntype, regval;
+
+ if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
+ return -EFAULT;
+
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ /* Confirm that 1588 is supported on this PF. */
+ pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
+ I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
+ if (hw->pf_id != pf_id)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ pf->ptp_tx = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ pf->ptp_tx = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ pf->ptp_rx = false;
+ tsyntype = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V1 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ pf->ptp_rx = true;
+ tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK |
+ I40E_PRTTSYN_CTL1_TSYNTYPE_V2 |
+ I40E_PRTTSYN_CTL1_UDP_ENA_MASK;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ /* Clear out all 1588-related registers to clear and unlatch them. */
+ rd32(hw, I40E_PRTTSYN_STAT_0);
+ rd32(hw, I40E_PRTTSYN_TXTIME_H);
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
+ rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
+
+ /* Enable/disable the Tx timestamp interrupt based on user input. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ if (pf->ptp_tx)
+ regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ else
+ regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+
+ regval = rd32(hw, I40E_PFINT_ICR0_ENA);
+ if (pf->ptp_tx)
+ regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ else
+ regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, regval);
+
+ /* There is no simple on/off switch for Rx. To "disable" Rx support,
+ * ignore any received timestamps, rather than turn off the clock.
+ */
+ if (pf->ptp_rx) {
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ /* clear everything but the enable bit */
+ regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ /* now enable bits for desired Rx timestamps */
+ regval |= tsyntype;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+ }
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * @pf: Board private structure
+ *
+ * This function registers the device clock as a PHC. If it is successful, it
+ * starts the clock in the hardware.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+
+ strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+ pf->ptp_caps.owner = THIS_MODULE;
+ pf->ptp_caps.max_adj = 999999999;
+ pf->ptp_caps.n_ext_ts = 0;
+ pf->ptp_caps.pps = 0;
+ pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjtime = i40e_ptp_adjtime;
+ pf->ptp_caps.gettime = i40e_ptp_gettime;
+ pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.enable = i40e_ptp_enable;
+
+ /* Attempt to register the clock before enabling the hardware. */
+ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
+ if (IS_ERR(pf->ptp_clock)) {
+ pf->ptp_clock = NULL;
+ dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
+ __func__);
+ } else {
+ struct timespec ts;
+ u32 regval;
+
+ spin_lock_init(&pf->tmreg_lock);
+
+ dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
+ netdev->name);
+ pf->flags |= I40E_FLAG_PTP;
+
+ /* Ensure the clocks are running. */
+ regval = rd32(hw, I40E_PRTTSYN_CTL0);
+ regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL0, regval);
+ regval = rd32(hw, I40E_PRTTSYN_CTL1);
+ regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK;
+ wr32(hw, I40E_PRTTSYN_CTL1, regval);
+
+ /* Set the increment value per clock tick. */
+ i40e_ptp_set_increment(pf);
+
+ /* reset the tstamp_config */
+ memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+
+ /* Set the clock value. */
+ ts = ktime_to_timespec(ktime_get_real());
+ i40e_ptp_settime(&pf->ptp_caps, &ts);
+ }
+}
+
+/**
+ * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the PHC.
+ **/
+void i40e_ptp_stop(struct i40e_pf *pf)
+{
+ pf->flags &= ~I40E_FLAG_PTP;
+ pf->ptp_tx = false;
+ pf->ptp_rx = false;
+
+ if (pf->ptp_tx_skb) {
+ dev_kfree_skb_any(pf->ptp_tx_skb);
+ pf->ptp_tx_skb = NULL;
+ }
+
+ if (pf->ptp_clock) {
+ ptp_clock_unregister(pf->ptp_clock);
+ pf->ptp_clock = NULL;
+ dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 6bd333cde28..947de98500f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -28,6 +27,10 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
@@ -38,6 +41,11 @@
#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
@@ -50,9 +58,14 @@
#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
#define I40E_PF_ARQBAH 0x00080180
#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
@@ -837,7 +850,7 @@
#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
#define I40E_GLHMC_PEQ1MAX 0x000C2054
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
@@ -903,7 +916,7 @@
#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
#define I40E_GLHMC_PEXFFLMAX 0x000C204c
#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
#define I40E_GLHMC_PEXFMAX 0x000C2048
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
@@ -1327,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1354,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1576,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
@@ -1636,7 +1653,7 @@
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
@@ -1773,16 +1790,20 @@
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
@@ -2035,6 +2056,28 @@
#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
#define I40E_GLPCI_BYTCTH 0x0009C484
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
@@ -2170,6 +2213,12 @@
#define I40E_GLPCI_PCIERR 0x000BE4FC
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
#define I40E_GLPCI_PKTCT 0x0009C4BC
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
@@ -2380,8 +2429,7 @@
#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+
#define I40E_PFPE_MRTEIDXMASK 0x00008600
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
@@ -2460,8 +2508,6 @@
#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
@@ -3141,30 +3187,6 @@
#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPM_DMACR 0x000881F4
-#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
-#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
-#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
-#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
-#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
-#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
-#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
-#define I40E_GLPM_LTRC 0x000BE500
-#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
-#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
-#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
-#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
-#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
-#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
-#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
-#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
-#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
-#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
-#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
-#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
#define I40E_PRTPM_EEE_STAT 0x001E4320
#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
@@ -3201,9 +3223,6 @@
#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_HPTC 0x000AC800
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
-#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0
#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
@@ -3265,8 +3284,8 @@
#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
-#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
@@ -3416,9 +3435,9 @@
#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
@@ -3504,7 +3523,7 @@
#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
-#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
@@ -3521,10 +3540,7 @@
#define I40E_GL_FCOEDDPC_MAX_INDEX 143
#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
-#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+/* _i=0...143 */
#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
@@ -4276,46 +4292,10 @@
#define I40E_PFPM_APM 0x000B8080
#define I40E_PFPM_APM_APME_SHIFT 0
#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
-#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
-#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
-#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
-#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
-#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
-#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
-#define I40E_PFPM_PROXYFC 0x00245A80
-#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
-#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
-#define I40E_PFPM_PROXYFC_EX_SHIFT 1
-#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
-#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_SHIFT 9
-#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
-#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
-#define I40E_PFPM_PROXYS 0x00245B80
-#define I40E_PFPM_PROXYS_EX_SHIFT 1
-#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_SHIFT 4
-#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
-#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_NS_SHIFT 9
-#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
-#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
-#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
-#define I40E_PFPM_PROXYS_MLD_SHIFT 12
-#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
#define I40E_PFPM_WUC 0x0006B200
#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
@@ -4536,21 +4516,21 @@
#define I40E_VFMSIX_PBA 0x00002000
#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TADD_MAX_INDEX 16
#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TMSG_MAX_INDEX 16
#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TUADD_MAX_INDEX 16
#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
@@ -4610,8 +4590,6 @@
#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
-#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
#define I40E_VFPE_MRTEIDXMASK1 0x00009000
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
@@ -4684,5 +4662,13 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 5e5bcddac57..5f9cac55aa5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 49d2cfa9b0c..e49f31dbd5d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,7 +24,9 @@
*
******************************************************************************/
+#include <linux/prefetch.h>
#include "i40e.h"
+#include "i40e_prototype.h"
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
@@ -37,19 +38,22 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
}
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_program_fdir_filter - Program a Flow Director filter
- * @fdir_input: Packet data that will be filter parameters
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
* @pf: The pf pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
struct i40e_pf *pf, bool add)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
+ unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -58,98 +62,90 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* find existing FDIR VSI */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
return -ENOENT;
- tx_ring = &vsi->tx_rings[0];
+ tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev;
- dma = dma_map_single(dev, fdir_data->raw_packet,
- I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ dma = dma_map_single(dev, raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto dma_fail;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
- << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
- & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+ fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
- << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
- & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+ fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+ I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
- fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
- << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
- & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+ fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+ I40E_TXD_FLTR_QW0_PCTYPE_MASK;
/* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0)
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
else
- fdir_desc->qindex_flex_ptype_vsi |=
- cpu_to_le32((fdir_data->dest_vsi
- << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
- & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+ fpt |= ((u32)fdir_data->dest_vsi <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+ I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->dtype_cmd_cntindex =
- cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+ dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add)
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
else
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
- << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
- << I40E_TXD_FLTR_QW1_DEST_SHIFT)
- & I40E_TXD_FLTR_QW1_DEST_MASK);
+ dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+ I40E_TXD_FLTR_QW1_DEST_MASK;
- fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
- (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
- & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+ dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+ I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
if (fdir_data->cnt_index != 0) {
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
- fdir_desc->dtype_cmd_cntindex |=
- cpu_to_le32((fdir_data->cnt_index
- << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
- & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dcc |= ((u32)fdir_data->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */
- tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
- tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_buf = &tx_ring->tx_bi[i];
+
+ tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
+ dma_unmap_addr_set(tx_buf, dma, dma);
tx_desc->buffer_addr = cpu_to_le64(dma);
- td_cmd = I40E_TX_DESC_CMD_EOP |
- I40E_TX_DESC_CMD_RS |
- I40E_TX_DESC_CMD_DUMMY;
+ td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
- /* Mark the data descriptor to be watched */
- tx_buf->next_to_watch = tx_desc;
+ /* set the timestamp */
+ tx_buf->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -158,6 +154,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/
wmb();
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
writel(tx_ring->next_to_use, tx_ring->tail);
return 0;
@@ -165,50 +164,349 @@ dma_fail:
return -1;
}
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ udp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ udp->source = fd_data->src_port;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ /* Dummy packet */
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+ 0x0, 0x72, 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
+
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fd_data->dst_ip[0];
+ tcp->dest = fd_data->dst_port;
+ ip->saddr = fd_data->src_ip[0];
+ tcp->source = fd_data->src_port;
+
+ if (add) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ }
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Always returns -EOPNOTSUPP
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ u8 *raw_packet, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+ static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+ 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+ ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fd_data->src_ip[0];
+ ip->daddr = fd_data->dst_ip[0];
+ ip->protocol = 0;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *input, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ switch (input->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
+ add);
+ break;
+ case IP_USER_FLOW:
+ switch (input->ip4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, input,
+ raw_packet, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, input,
+ raw_packet, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, input,
+ raw_packet, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
+ input->flow_type);
+ ret = -EINVAL;
+ }
+
+ kfree(raw_packet);
+ return ret;
+}
+
/**
* i40e_fd_handle_status - check the Programming Status for FD
* @rx_ring: the Rx ring for this descriptor
- * @qw: the descriptor data
+ * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
* @prog_id: the id originally used for programming
*
* This is used to verify if the FD programming or invalidation
* requested by SW to the HW is successful or not and take actions accordingly.
**/
-static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, u8 prog_id)
{
- struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ struct i40e_pf *pf = rx_ring->vsi->back;
+ struct pci_dev *pdev = pf->pdev;
+ u32 fcnt_prog, fcnt_avail;
u32 error;
+ u64 qw;
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
- /* for now just print the Status */
- dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
- prog_id, error);
+ if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+ dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+
+ /* filter programming failed most likely due to table full */
+ fcnt_prog = i40e_get_current_fd_count(pf);
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
+ /* If ATR is running fcnt_prog can quickly change,
+ * if we are very close to full, it makes sense to disable
+ * FD ATR/SB and then re-enable it when there is room.
+ */
+ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+ /* Turn off ATR first */
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_ATR_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ pf->auto_disable_flags |=
+ I40E_FLAG_FD_SB_ENABLED;
+ pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+ }
+ } else {
+ dev_info(&pdev->dev, "FD filter programming error\n");
+ }
+ } else if (error ==
+ (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
+ rx_desc->wb.qword0.hi_dword.fd_id);
+ }
}
/**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
* @ring: the ring that owns the buffer
* @tx_buffer: the buffer to free
**/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
- struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
{
- if (tx_buffer->dma) {
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
- dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
- DMA_TO_DEVICE);
- else
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer->dma = 0;
- tx_buffer->time_stamp = 0;
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
/**
@@ -217,7 +515,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
**/
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
{
- struct i40e_tx_buffer *tx_buffer;
unsigned long bi_size;
u16 i;
@@ -226,13 +523,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* Free all the Tx ring sk_buffs */
- for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_buffer);
- if (tx_buffer->skb)
- dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
- }
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +534,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
}
/**
@@ -300,14 +599,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -315,6 +614,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
* @budget: how many cleans we're allowed
@@ -325,68 +638,96 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
unsigned int total_packets = 0;
unsigned int total_bytes = 0;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
- for (; budget; budget--) {
- struct i40e_tx_desc *eop_desc;
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
- eop_desc = tx_buf->next_to_watch;
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
- /* if the descriptor isn't done, no work yet to do */
- if (!(eop_desc->cmd_type_offset_bsz &
- cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
break;
- /* count the packet as being completed */
- tx_ring->tx_stats.completed++;
+ /* clear next_to_watch to prevent false hangs */
tx_buf->next_to_watch = NULL;
- tx_buf->time_stamp = 0;
- /* set memory barrier before eop_desc is verified */
- rmb();
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
- do {
- i40e_unmap_tx_resource(tx_ring, tx_buf);
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
- /* clear dtype status */
- tx_desc->cmd_type_offset_bsz &=
- ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
- if (likely(tx_desc == eop_desc)) {
- eop_desc = NULL;
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
- dev_kfree_skb_any(tx_buf->skb);
- tx_buf->skb = NULL;
-
- total_bytes += tx_buf->bytecount;
- total_packets += tx_buf->gso_segs;
- }
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
tx_buf++;
tx_desc++;
i++;
- if (unlikely(i == tx_ring->count)) {
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
tx_buf = tx_ring->tx_bi;
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
- } while (eop_desc);
- }
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
- tx_ring->tx_stats.bytes += total_bytes;
- tx_ring->tx_stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +755,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
return true;
}
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +869,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
i40e_set_new_dynamic_itr(&q_vector->tx);
if (old_itr != q_vector->tx.itr)
wr32(hw, reg_addr, q_vector->tx.itr);
-
- i40e_flush(hw);
}
/**
@@ -549,7 +892,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
- i40e_fd_handle_status(rx_ring, qw, id);
+ i40e_fd_handle_status(rx_ring, rx_desc, id);
}
/**
@@ -573,6 +916,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
@@ -746,7 +1093,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
@@ -760,7 +1107,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
goto no_buffers;
}
@@ -770,7 +1117,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
}
@@ -785,7 +1132,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) {
- rx_ring->rx_stats.alloc_rx_page_failed++;
+ rx_ring->rx_stats.alloc_page_failed++;
bi->page_dma = 0;
goto no_buffers;
}
@@ -838,27 +1185,107 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
* @skb: skb currently being received and modified
* @rx_status: status value of last descriptor in packet
* @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
u32 rx_status,
- u32 rx_error)
+ u32 rx_error,
+ u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ struct iphdr *iph;
+ __sum16 csum;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
return;
- /* IP or L4 checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
+ /* likely incorrect csum if alternate IP extension headers found */
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
+
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ return;
+
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
+ if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -869,13 +1296,38 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
- if (ring->netdev->features & NETIF_F_RXHASH) {
- if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
- I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
- I40E_RX_DESC_FLTSTAT_RSS_HASH)
- return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- }
- return 0;
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
}
/**
@@ -895,12 +1347,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ u8 rx_ptype;
u64 qword;
+ if (budget <= 0)
+ return 0;
+
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
@@ -916,18 +1372,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
skb = rx_bi->skb;
prefetch(skb->data);
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
- >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
- >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
- >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_bi->skb = NULL;
/* This memory barrier is needed to keep us from reading
@@ -1004,17 +1462,29 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
- skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
total_rx_packets++;
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
: 0;
@@ -1037,13 +1507,15 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
}
rx_ring->next_to_clean = i;
- rx_ring->rx_stats.packets += total_rx_packets;
- rx_ring->rx_stats.bytes += total_rx_bytes;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
@@ -1067,27 +1539,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
bool clean_complete = true;
int budget_per_ring;
- int i;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
return 0;
}
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
- * Since the actual Tx work is minimal, we can give the Tx a larger
- * budget and be more aggressive about cleaning up the Tx descriptors.
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- for (i = 0; i < q_vector->num_ringpairs; i++) {
- clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
- vsi->work_limit);
- clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
- budget_per_ring);
- }
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
if (!clean_complete)
@@ -1117,7 +1590,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0));
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(vsi->back);
}
}
@@ -1144,17 +1618,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th;
unsigned int hlen;
u32 flex_ptype, dtype_cmd;
+ u16 i;
/* make sure ATR is enabled */
- if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
/* if sampling is disabled do nothing */
if (!tx_ring->atr_sample_rate)
return;
- tx_ring->atr_count++;
-
/* snag network header to get L4 type and address */
hdr.network = skb_network_header(skb);
@@ -1176,17 +1649,27 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
th = (struct tcphdr *)(hdr.network + hlen);
- /* sample on all syn/fin packets or once every atr sample rate */
- if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ /* Due to lack of space, no more new filters can be programmed */
+ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
+ tx_ring->atr_count++;
+
+ /* sample on all syn/fin/rst packets or once every atr sample rate */
+ if (!th->fin &&
+ !th->syn &&
+ !th->rst &&
+ (tx_ring->atr_count < tx_ring->atr_sample_rate))
return;
tx_ring->atr_count = 0;
/* grab the next descriptor */
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1200,7 +1683,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
- dtype_cmd |= th->fin ?
+ dtype_cmd |= (th->fin || th->rst) ?
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1212,11 +1695,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |=
+ ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
@@ -1241,7 +1728,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -1261,9 +1748,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
struct vlan_ethhdr *vhdr;
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
- return -ENOMEM;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(tx_flags >>
I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1276,27 +1765,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
}
/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring: ptr to the ring to send
- * @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
-{
- if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
- !(tx_flags & I40E_TX_FLAGS_TXSW)) {
- if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
- return false;
- }
-
- return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
-/**
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
@@ -1312,22 +1780,20 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
+ struct ipv6hdr *ipv6h;
struct tcphdr *tcph;
struct iphdr *iph;
u32 l4len;
int err;
- struct ipv6hdr *ipv6h;
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
@@ -1353,10 +1819,47 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
- | ((u64)cd_tso_len
- << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
- | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tsyn - set up the tsyn context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ *
+ * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
+ **/
+static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, u64 *cd_type_cmd_tso_mss)
+{
+ struct i40e_pf *pf;
+
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return 0;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (tx_flags & I40E_TX_FLAGS_TSO)
+ return 0;
+
+ /* only timestamp the outbound packet if the user has requested it and
+ * we are not already transmitting a packet to be timestamped
+ */
+ pf = i40e_netdev_to_pf(tx_ring->netdev);
+ if (pf->ptp_tx && !pf->ptp_tx_skb) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ pf->ptp_tx_skb = skb_get(skb);
+ } else {
+ return 0;
+ }
+
+ *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT;
+
return 1;
}
@@ -1482,15 +1985,17 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2)
{
struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
- if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
return;
/* grab the next descriptor */
- context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
- tx_ring->next_to_use++;
- if (tx_ring->next_to_use == tx_ring->count)
- tx_ring->next_to_use = 0;
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +2017,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct device *dev = tx_ring->dev;
- u32 paylen = skb->len - hdr_len;
- u16 i = tx_ring->next_to_use;
+ struct skb_frag_struct *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
- u32 buf_offset = 0;
+ u16 i = tx_ring->next_to_use;
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
-
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
I40E_TX_FLAGS_VLAN_SHIFT;
}
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
tx_desc = I40E_TX_DESC(tx_ring, i);
- for (;;) {
- while (size > I40E_MAX_DATA_PER_TXD) {
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
I40E_MAX_DATA_PER_TXD, td_tag);
- buf_offset += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
-
tx_desc++;
i++;
if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0;
}
- }
- tx_bi = &tx_ring->tx_bi[i];
- tx_bi->length = buf_offset + size;
- tx_bi->tx_flags = tx_flags;
- tx_bi->dma = dma;
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
- tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
- tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
- size, td_tag);
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
if (likely(!data_len))
break;
- size = skb_frag_size(frag);
- data_len -= size;
- buf_offset = 0;
- tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_error;
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
tx_desc++;
i++;
@@ -1582,31 +2090,39 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- frag++;
- }
-
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+ size = skb_frag_size(frag);
+ data_len -= size;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
- tx_ring->next_to_use = i;
+ tx_bi = &tx_ring->tx_bi[i];
+ }
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
- gso_segs = skb_shinfo(skb)->gso_segs;
- else
- gso_segs = 1;
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
- /* multiply data chunks by size of headers */
- tx_bi->bytecount = paylen + (gso_segs * hdr_len);
- tx_bi->gso_segs = gso_segs;
- tx_bi->skb = skb;
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
- /* set the timestamp and next to watch values */
+ /* set the timestamp */
first->time_stamp = jiffies;
- first->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -1615,16 +2131,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
*/
wmb();
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
writel(i, tx_ring->tail);
+
return;
dma_error:
- dev_info(dev, "TX DMA map failed\n");
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
/* clear dma mappings for failed tx_bi map */
for (;;) {
tx_bi = &tx_ring->tx_bi[i];
- i40e_unmap_tx_resource(tx_ring, tx_bi);
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
if (tx_bi == first)
break;
if (i == 0)
@@ -1632,8 +2159,6 @@ dma_error:
i--;
}
- dev_kfree_skb_any(skb);
-
tx_ring->next_to_use = i;
}
@@ -1647,6 +2172,7 @@ dma_error:
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
@@ -1685,25 +2211,20 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 2 desc gap to keep tail from touching head,
+ * + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
@@ -1728,6 +2249,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tsyn;
int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
@@ -1743,9 +2265,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first = &tx_ring->tx_bi[tx_ring->next_to_use];
/* setup IPv4/IPv6 offloads */
- if (protocol == __constant_htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == __constant_htons(ETH_P_IPV6))
+ else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
@@ -1758,16 +2280,21 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
- /* Always offload the checksum, since it's in the data descriptor */
- if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
- tx_flags |= I40E_TX_FLAGS_CSUM;
+ tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
+
+ if (tsyn)
+ tx_flags |= I40E_TX_FLAGS_TSYN;
- /* always enable offload insertion */
+ /* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- if (tx_flags & I40E_TX_FLAGS_CSUM)
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
+ }
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
@@ -1801,7 +2328,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+ struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b1d7722d98a..0277894fe1c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -25,11 +24,13 @@
*
******************************************************************************/
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
-#define I40E_MAX_ITR 0x07FF
-#define I40E_MIN_ITR 0x0001
-#define I40E_ITR_USEC_RESOLUTION 2
+/* Interrupt Throttling and Rate Limiting Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
#define I40E_MAX_IRATE 0x03F
#define I40E_MIN_IRATE 0x001
#define I40E_IRATE_USEC_RESOLUTION 4
@@ -49,10 +50,38 @@
#define I40E_QUEUE_END_OF_LIST 0x7FF
-#define I40E_ITR_NONE 3
-#define I40E_RX_ITR 0
-#define I40E_TX_ITR 1
-#define I40E_PE_ITR 2
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
/* Supported Rx Buffer Sizes */
#define I40E_RXBUFFER_512 512 /* Used for packet split */
#define I40E_RXBUFFER_2048 2048
@@ -88,11 +117,11 @@
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -102,23 +131,21 @@
#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
+#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define I40E_TX_FLAGS_VLAN_SHIFT 16
struct i40e_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u32 tx_flags;
struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
unsigned int bytecount;
- u16 gso_segs;
- u8 mapped_as_page;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct i40e_rx_buffer {
@@ -129,21 +156,21 @@ struct i40e_rx_buffer {
unsigned int page_offset;
};
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
u64 packets;
u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
struct i40e_rx_queue_stats {
- u64 packets;
- u64 bytes;
u64 non_eop_descs;
- u64 alloc_rx_page_failed;
- u64 alloc_rx_buff_failed;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
};
enum i40e_ring_state_t {
@@ -152,7 +179,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -168,12 +194,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
@@ -183,6 +203,7 @@ enum i40e_ring_state_t {
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -216,9 +237,13 @@ struct i40e_ring {
u8 atr_sample_rate;
u8 atr_count;
+ unsigned long last_rx_timestamp;
+
bool ring_active; /* is ring online or not */
/* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
union {
struct i40e_tx_queue_stats tx_stats;
struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +254,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -238,9 +265,8 @@ enum i40e_latency_range {
};
struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
/* array of pointers to rings */
- struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ struct i40e_ring *ring;
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 count;
@@ -248,6 +274,10 @@ struct i40e_ring_container {
u16 itr;
};
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -257,3 +287,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index f3f22b20f02..9d39ff23c5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -36,47 +35,36 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_SFP_XL710_DEVICE_ID 0x1572
-#define I40E_SFP_X710_DEVICE_ID 0x1573
-#define I40E_QEMU_DEVICE_ID 0x1574
-#define I40E_KX_A_DEVICE_ID 0x157F
-#define I40E_KX_B_DEVICE_ID 0x1580
-#define I40E_KX_C_DEVICE_ID 0x1581
-#define I40E_KX_D_DEVICE_ID 0x1582
-#define I40E_QSFP_A_DEVICE_ID 0x1583
-#define I40E_QSFP_B_DEVICE_ID 0x1584
-#define I40E_QSFP_C_DEVICE_ID 0x1585
-#define I40E_VF_DEVICE_ID 0x154C
-#define I40E_VF_HV_DEVICE_ID 0x1571
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0000
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define I40E_IS_MULTICAST(address) \
- (bool)(((u8 *)(address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define I40E_IS_BROADCAST(address) \
- ((((u8 *)(address))[0] == ((u8)0xff)) && \
- (((u8 *)(address))[1] == ((u8)0xff)))
-
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define I40E_ETH_LENGTH_OF_ADDRESS 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
@@ -85,9 +73,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
/* bitfields for Tx queue mapping in QTX_CTL */
#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks */
+/* debug masks - set these bits in hw->debug_mask to control output */
enum i40e_debug_mask {
I40E_DEBUG_INIT = 0x00000001,
I40E_DEBUG_RELEASE = 0x00000002,
@@ -100,11 +89,12 @@ enum i40e_debug_mask {
I40E_DEBUG_FLOW = 0x00000200,
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
I40E_DEBUG_AQ = 0x0F000000,
I40E_DEBUG_USER = 0xF0000000,
@@ -134,6 +124,7 @@ enum i40e_media_type {
I40E_MEDIA_TYPE_BASET,
I40E_MEDIA_TYPE_BACKPLANE,
I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
I40E_MEDIA_TYPE_VIRTUAL
};
@@ -171,8 +162,12 @@ struct i40e_link_status {
u8 link_info;
u8 an_info;
u8 ext_info;
+ u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -236,9 +231,9 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
u16 max_fcoeq;
};
@@ -415,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -465,6 +461,10 @@ union i40e_32byte_rx_desc {
union {
__le32 rss; /* RSS Hash */
__le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
} hi_dword;
} qword0;
struct {
@@ -490,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -500,18 +497,31 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
- I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
- I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
enum i40e_rx_desc_fltstat_values {
I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
@@ -531,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -547,28 +558,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
/* Packet type non-ip values */
enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
};
struct i40e_rx_ptype_decoded {
@@ -648,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -693,7 +707,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
enum i40e_rx_prog_status_desc_error_bits {
/* Note: These are predefined bit offsets */
I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
- I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
};
@@ -852,22 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Value 0-25 are reserved for future use */
- I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
- I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
- I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Value 37 is reserved for future use */
- I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -877,7 +883,8 @@ enum i40e_filter_pctype {
/* Note: Value 47 is reserved for future use */
I40E_FILTER_PCTYPE_FCOE_OX = 48,
I40E_FILTER_PCTYPE_FCOE_RX = 49,
- /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
};
@@ -948,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -955,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1008,12 +1023,21 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
};
/* Checksum and Shadow RAM pointers */
#define I40E_SR_NVM_CONTROL_WORD 0x00
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
#define I40E_SR_NVM_EETRACK_HI 0x2E
@@ -1138,17 +1162,4 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
-
-/* IEEE 802.1AB LLDP Agent Variables from NVM */
-#define I40E_NVM_LLDP_CFG_PTR 0xF
-struct i40e_lldp_variables {
- u16 length;
- u16 adminstatus;
- u16 msgfasttx;
- u16 msgtxinterval;
- u16 txparams;
- u16 timers;
- u16 crc8;
-};
-
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index cc6654f1dac..70951d2edca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
- u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 default_mac_addr[ETH_ALEN];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
@@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {
*/
struct i40e_virtchnl_ether_addr {
- u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
u8 pad[2];
};
@@ -342,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8967e58e240..f5b9d206257 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -30,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -102,130 +119,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
}
/**
- * i40e_ctrl_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
- * i40e_ctrl_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
- * @vsi_queue_id: vsi relative queue index
- * @ctrl: control flags
- *
- * enable/disable/enable check/disable check
- **/
-static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
- u16 vsi_queue_id,
- enum i40e_queue_ctrl ctrl)
-{
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- bool writeback = false;
- u16 pf_queue_id;
- int ret = 0;
- u32 reg;
-
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
- reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
-
- switch (ctrl) {
- case I40E_QUEUE_CTRL_ENABLE:
- reg |= I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_ENABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
- break;
- case I40E_QUEUE_CTRL_DISABLE:
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_DISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLE:
- reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- break;
- case I40E_QUEUE_CTRL_FASTDISABLECHECK:
- ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
- if (!ret) {
- reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
- writeback = true;
- }
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (writeback) {
- wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
- i40e_flush(hw);
- }
-
- return ret;
-}
-
-/**
* i40e_config_irq_link_list
* @vf: pointer to the vf info
* @vsi_idx: index of VSI in PF struct
@@ -251,8 +144,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else
reg_idx = I40E_VPINT_LNKLSTN(
- ((pf->hw.func_caps.num_msix_vectors_vf - 1)
- * vf->vf_id) + (vector_id - 1));
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+ (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
/* Special case - No queues mapped on this vector */
@@ -260,23 +153,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
goto irq_list_done;
}
tempmap = vecmap->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES *
vsi_queue_id));
- vsi_queue_id =
- find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
}
tempmap = vecmap->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
linklistmap |= (1 <<
(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ 1));
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
next_q = find_first_bit(&linklistmap,
@@ -307,7 +194,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP *
I40E_VIRTCHNL_SUPPORTED_QTYPES),
next_q + 1);
- if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ if (next_q <
+ (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
@@ -360,6 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
+ tx_ctx.head_wb_ena = info->headwb_enabled;
+ tx_ctx.head_wb_addr = info->dma_headwb_addr;
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -383,7 +273,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
- qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
& I40E_QTX_CTL_PF_INDX_MASK);
qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
<< I40E_QTX_CTL_VFVM_INDX_SHIFT)
@@ -463,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
rx_ctx.tphhead_ena = 1;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -499,7 +390,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
struct i40e_vsi *vsi;
int ret = 0;
@@ -513,167 +403,47 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_index = vsi->idx;
vf->lan_vsi_id = vsi->id;
dev_info(&pf->pdev->dev,
- "LAN VSI index %d, VSI id %d\n",
- vsi->idx, vsi->id);
+ "VF %d assigned LAN VSI index %d, VSI id %d\n",
+ vf->vf_id, vsi->idx, vsi->id);
+ /* If the port VLAN has been configured and then the
+ * VF driver was removed then the VSI port VLAN
+ * configuration was destroyed. Check if there is
+ * a port VLAN and restore the VSI configuration if
+ * needed.
+ */
+ if (vf->port_vlan_id)
+ i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
- 0, true, false);
- }
- if (!f) {
- dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
- ret = -ENOMEM;
- goto error_alloc_vsi_res;
+ vf->port_vlan_id, true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF MAC addr\n");
+ f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+ true, false);
+ if (!f)
+ dev_info(&pf->pdev->dev,
+ "Could not allocate VF broadcast filter\n");
}
/* program mac filter */
ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
+ if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
- goto error_alloc_vsi_res;
- }
-
- /* accept bcast pkts. by default */
- ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
- vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
- ret = -EINVAL;
- }
-
-error_alloc_vsi_res:
- return ret;
-}
-
-/**
- * i40e_reset_vf
- * @vf: pointer to the vf structure
- * @flr: VFLR was issued or not
- *
- * reset the vf
- **/
-int i40e_reset_vf(struct i40e_vf *vf, bool flr)
-{
- int ret = -ENOENT;
- struct i40e_pf *pf = vf->pf;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, reg_idx, msix_vf;
- bool rsd = false;
- u16 pf_queue_id;
- int i, j;
-
- /* warn the VF */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
-
- clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
- /* PF triggers VFR only when VF requests, in case of
- * VFLR, HW triggers VFR
- */
- if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
- reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- i40e_flush(hw);
- }
-
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 4; i++) {
- /* vf reset requires driver to first reset the
- * vf & than poll the status register to make sure
- * that the requested op was completed
- * successfully
- */
- udelay(10);
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
- rsd = true;
- break;
- }
- }
-
- if (!rsd)
- dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
- vf->vf_id);
-
- /* fast disable qps */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLE);
- }
-
- /* Queue enable/disable requires driver to
- * first reset the vf & than poll the status register
- * to make sure that the requested op was completed
- * successfully
- */
- udelay(10);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
- ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
- I40E_QUEUE_CTRL_FASTDISABLECHECK);
- if (ret)
- dev_info(&pf->pdev->dev,
- "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
- vf->lan_vsi_index, j, vf->vf_id);
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
}
- /* clear the irq settings */
- msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
- else
- reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- wr32(hw, reg_idx, reg);
- i40e_flush(hw);
- }
- /* disable interrupts so the VF starts in a known state */
- for (i = 0; i < msix_vf; i++) {
- /* format is same for both registers */
- if (0 == i)
- reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
- else
- reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
- (vf->vf_id))
- + (i - 1));
- wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- i40e_flush(hw);
- }
-
- /* set the defaults for the rqctl & tqctl registers */
- reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
- I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
- wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
- wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
- }
-
- /* clear the reset bit in the VPGEN_VFRTRIG reg */
- reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
- /* tell the VF the reset is done */
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
- i40e_flush(hw);
-
+error_alloc_vsi_res:
return ret;
}
@@ -756,6 +526,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
static void i40e_free_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_idx, reg;
+ int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
if (vf->lan_vsi_index) {
@@ -763,6 +536,35 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_index = 0;
vf->lan_vsi_id = 0;
}
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* clear the irq settings */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
/* reset some of the state varibles keeping
* track of the resources
*/
@@ -804,6 +606,111 @@ error_alloc:
return ret;
}
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_MASK 0x20
+/**
+ * i40e_quiesce_vf_pci
+ * @vf: pointer to the vf structure
+ *
+ * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
+ * if the transactions never clear.
+ **/
+static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int vf_abs_id, i;
+ u32 reg;
+
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ wr32(hw, I40E_PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < 100; i++) {
+ reg = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((reg & VF_TRANS_PENDING_MASK) == 0)
+ return 0;
+ udelay(1);
+ }
+ return -EIO;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+void i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool rsd = false;
+ int i;
+ u32 reg;
+
+ /* warn the VF */
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* In the case of a VFLR, the HW has already reset the VF and we
+ * just need to clean up, so don't hit the VFRTRIG register.
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ if (i40e_quiesce_vf_pci(vf))
+ dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
+ vf->vf_id);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 100; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
+ vf->vf_id);
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+
+ /* On initial reset, we won't have any queues */
+ if (vf->lan_vsi_index == 0)
+ goto complete_reset;
+
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+complete_reset:
+ /* reallocate vf resources to reset the VSI state */
+ i40e_free_vf_res(vf);
+ i40e_alloc_vf_res(vf);
+ i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+ i40e_flush(hw);
+}
+
/**
* i40e_vfs_are_assigned
* @pf: pointer to the pf structure
@@ -816,7 +723,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
struct pci_dev *vfdev;
/* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);
while (vfdev) {
/* if we don't own it we don't care */
if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
@@ -826,12 +733,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
}
vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- I40E_VF_DEVICE_ID,
+ I40E_DEV_ID_VF,
vfdev);
}
return false;
}
+#ifdef CONFIG_PCI_IOV
+
+/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+#endif
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the pf structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get pf vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
/**
* i40e_free_vfs
@@ -842,17 +819,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
void i40e_free_vfs(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int i;
+ u32 reg_idx, bit_idx;
+ int i, tmp, vf_id;
if (!pf->vf)
return;
/* Disable interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, I40E_PFINT_DYN_CTL0, 0);
- i40e_flush(hw);
+ i40e_irq_dynamic_disable_icr0(pf);
+ mdelay(10); /* let any messages in transit get finished up */
/* free up vf resources */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
+ tmp = pf->num_alloc_vfs;
+ pf->num_alloc_vfs = 0;
+ for (i = 0; i < tmp; i++) {
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_free_vf_res(&pf->vf[i]);
/* disable qp mappings */
@@ -861,20 +841,29 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
- pf->num_alloc_vfs = 0;
- if (!i40e_vfs_are_assigned(pf))
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
+ if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
- else
+ /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
+ * work correctly when SR-IOV gets re-enabled.
+ */
+ for (vf_id = 0; vf_id < tmp; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+ }
+ i40e_disable_pf_switch_lb(pf);
+ } else {
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
+ }
/* Re-enable interrupt 0. */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
- i40e_flush(hw);
+ i40e_irq_dynamic_enable_icr0(pf);
}
#ifdef CONFIG_PCI_IOV
@@ -885,25 +874,31 @@ void i40e_free_vfs(struct i40e_pf *pf)
*
* allocate vf resources
**/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
struct i40e_vf *vfs;
int i, ret = 0;
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "pci_enable_sriov failed with error %d!\n", ret);
- pf->num_alloc_vfs = 0;
- goto err_iov;
- }
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ i40e_irq_dynamic_disable_icr0(pf);
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
+ }
/* allocate memory */
- vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
ret = -ENOMEM;
goto err_alloc;
}
+ pf->vf = vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
@@ -913,22 +908,22 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
-
- ret = i40e_alloc_vf_res(&vfs[i]);
- i40e_reset_vf(&vfs[i], true);
- if (ret)
- break;
+ vfs[i].spoofchk = true;
+ /* vf resources get allocated during reset */
+ i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
- pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
+ i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
err_iov:
+ /* Re-enable interrupt 0. */
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -988,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
- i40e_free_vfs(pf);
+ if (!i40e_vfs_are_assigned(pf)) {
+ i40e_free_vfs(pf);
+ } else {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -1009,6 +1009,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@@ -1028,8 +1029,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
- aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
+ aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@@ -1144,12 +1145,10 @@ err:
* unlike other virtchnl messages, pf driver
* doesn't send the response back to the vf
**/
-static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return -ENOENT;
-
- return i40e_reset_vf(vf, false);
+ if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ i40e_reset_vf(vf, false);
}
/**
@@ -1291,27 +1290,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* lookout for the invalid queue index */
tempmap = map->rxq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
tempmap = map->txq_map;
- vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- vsi_queue_id + 1);
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1337,8 +1330,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1354,66 +1345,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_ENABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -1436,8 +1369,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
- u16 queue_id;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1453,65 +1384,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLE);
-
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- /* Poll the status register to make sure that the
- * requested op was completed successfully
- */
- udelay(10);
-
- tempmap = vqs->rx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on RX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
-
- tempmap = vqs->tx_queues;
- queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
- while (queue_id < I40E_MAX_VSI_QP) {
- if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
- I40E_QUEUE_CTRL_DISABLECHECK)) {
- dev_err(&pf->pdev->dev,
- "Queue control check failed on TX queue %d of VSI %d VF %d\n",
- queue_id, vsi_id, vf->vf_id);
- }
- queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
- queue_id + 1);
- }
+ if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+ aq_ret = I40E_ERR_TIMEOUT;
error_param:
/* send the response to the vf */
@@ -1554,7 +1428,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
- memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@@ -1563,6 +1437,40 @@ error_param:
}
/**
+ * i40e_check_vf_permission
+ * @vf: pointer to the vf info
+ * @macaddr: pointer to the MAC Address being checked
+ *
+ * Check if the VF has permission to add or delete unicast MAC address
+ * filters and return error code -EPERM if not. Then check if the
+ * address filter requested is broadcast or zero and if so return
+ * an invalid MAC address error code.
+ **/
+static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
+{
+ struct i40e_pf *pf = vf->pf;
+ int ret = 0;
+
+ if (is_broadcast_ether_addr(macaddr) ||
+ is_zero_ether_addr(macaddr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
+ /* If the host VMM administrator has set the VF MAC address
+ * administratively via the ndo_set_vf_mac command then deny
+ * permission to the VF to add or delete unicast MAC addresses.
+ * The VF may request to set the MAC address filter already
+ * assigned to it so do not return an error in that case.
+ */
+ dev_err(&pf->pdev->dev,
+ "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1577,24 +1485,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
- if (is_broadcast_ether_addr(al->list[i].addr) ||
- is_zero_ether_addr(al->list[i].addr)) {
- dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
- al->list[i].addr);
- aq_ret = I40E_ERR_PARAM;
+ ret = i40e_check_vf_permission(vf, al->list[i].addr);
+ if (ret)
goto error_param;
- }
}
vsi = pf->vsi[vsi_id];
@@ -1603,7 +1507,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false);
- if (f) {
+ if (!f) {
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
true, false);
@@ -1615,7 +1519,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
}
@@ -1627,7 +1531,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1645,15 +1549,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
- i40e_status aq_ret = 0;
+ i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = I40E_ERR_PARAM;
+ ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
+ al->list[i].addr);
+ ret = I40E_ERR_INVALID_MAC_ADDR;
+ goto error_param;
+ }
+ }
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@@ -1668,7 +1582,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- aq_ret);
+ ret);
}
/**
@@ -1777,30 +1691,6 @@ error_param:
}
/**
- * i40e_vc_fcoe_msg
- * @vf: pointer to the vf info
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * called from the vf for the fcoe msgs
- **/
-static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
-{
- i40e_status aq_ret = 0;
-
- if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
- aq_ret = I40E_ERR_NOT_IMPLEMENTED;
-
-error_param:
- /* send the response to the vf */
- return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
-}
-
-/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the vf info
* @msg: pointer to the msg buffer
@@ -1920,19 +1810,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
- struct i40e_vf *vf = &(pf->vf[vf_id]);
struct i40e_hw *hw = &pf->hw;
+ unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
+ if (local_vf_id >= pf->num_alloc_vfs)
+ return -EINVAL;
+ vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ local_vf_id, v_opcode, msglen);
return ret;
}
- wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@@ -1941,7 +1836,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_get_vf_resources_msg(vf);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- ret = i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf_msg(vf);
+ ret = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
@@ -1973,13 +1869,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
- case I40E_VIRTCHNL_OP_FCOE:
- ret = i40e_vc_fcoe_msg(vf, msg, msglen);
- break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev,
- "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@@ -2015,19 +1908,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- if (i40e_reset_vf(vf, true))
- dev_err(&pf->pdev->dev,
- "Unable to reset the VF %d\n", vf_id);
- /* free up vf resources to destroy vsi state */
- i40e_free_vf_res(vf);
-
- /* allocate new vf resources with the default state */
- if (i40e_alloc_vf_res(vf))
- dev_err(&pf->pdev->dev,
- "Unable to allocate VF resources %d\n",
- vf_id);
-
- i40e_enable_vf_mappings(vf);
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ i40e_reset_vf(vf, true);
}
}
@@ -2078,15 +1960,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
void i40e_vc_notify_link_state(struct i40e_pf *pf)
{
struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- pfe.event_data.link_event.link_status =
- pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
-
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (vf->link_forced) {
+ pfe.event_data.link_event.link_status = vf->link_up;
+ pfe.event_data.link_event.link_speed =
+ (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ } else {
+ pfe.event_data.link_event.link_status =
+ ls->link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = ls->link_speed;
+ }
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+ vf++;
+ }
}
/**
@@ -2164,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
- /* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add VF ucast filter\n");
- ret = -ENOMEM;
- goto error_param;
- }
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2182,7 +2075,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EIO;
goto error_param;
}
- memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->default_lan_addr.addr, mac);
+ vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2229,6 +2123,30 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
+ vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
+
+ /* Check for condition where there was already a port VLAN ID
+ * filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
+ * Before deleting all the old VLAN filters we must add new ones
+ * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
+ * MAC addresses deleted.
+ */
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
+ ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
+
if (vsi->info.pvid) {
/* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
@@ -2243,7 +2161,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
- i40e_vlan_stripping_disable(vsi);
+ i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@@ -2257,18 +2175,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
vsi->back->hw.aq.asq_last_status);
goto error_pvid;
}
+ /* Kill non-vlan MAC filters - ignore error return since
+ * there might not be any non-vlan MAC filters.
+ */
+ i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);
}
if (ret) {
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
+ /* The Port VLAN needs to be saved across resets the same as the
+ * default LAN MAC address.
+ */
+ vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:
return ret;
}
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
@@ -2277,9 +2205,76 @@ error_pvid:
*
* configure vf tx rate
**/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (min_tx_rate) {
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ max_tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+ dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = 50;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = max_tx_rate;
+error:
+ return ret;
}
/**
@@ -2294,7 +2289,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_vf *vf;
@@ -2318,18 +2312,130 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
- /* first entry of the list is the default ethernet address */
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
- break;
- }
+ memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->spoofchk = vf->spoofchk;
ret = 0;
error_param:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_virtchnl_pf_event pfe;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+
+ switch (link) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed =
+ pf->hw.phy.link_info.link_speed;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ pfe.event_data.link_event.link_status = true;
+ pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error_out;
+ }
+ /* Notify the VF of its new link state */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &(pf->vf[vf_id]);
+
+ if (enable == vf->spoofchk)
+ goto out;
+
+ vf->spoofchk = enable;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (enable)
+ ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+ ret);
+ ret = -EIO;
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 360382cf304..63e7e0d81ad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -82,6 +81,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
+ u16 port_vlan_id;
+ bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -97,23 +98,32 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ bool link_forced;
+ bool link_up; /* only valid if vf link is forced */
+ bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
-int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* vf configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
+
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
new file mode 100644
index 00000000000..3a423836a56
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -0,0 +1,36 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+# Copyright(c) 2013 - 2014 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+## Makefile for the Intel(R) 40GbE VF driver
+#
+#
+
+obj-$(CONFIG_I40EVF) += i40evf.o
+
+i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \
+ i40e_txrx.o i40e_common.o i40e_adminq.o
+
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
new file mode 100644
index 00000000000..eb67cce3e8f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -0,0 +1,978 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.asq.len = I40E_VF_ATQLEN1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ hw->aq.arq.len = I40E_VF_ARQLEN1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.asq.len = I40E_PF_ATQLEN;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ hw->aq.arq.len = I40E_PF_ARQLEN;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQBAL1,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH,
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQBAL,
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
+ }
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQBAL1,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH,
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQBAL,
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40evf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40evf_init_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40evf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (i40evf_check_asq_alive(hw))
+ i40evf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40evf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40evf_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ * i40evf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40evf_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40evf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40evf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40evf_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40evf_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
new file mode 100644
index 00000000000..e3472c62e15
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ struct i40e_virt_mem dma_head; /* space for dma structures */
+ struct i40e_dma_mem desc_buf; /* descriptor ring memory */
+ struct i40e_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
new file mode 100644
index 00000000000..e656ea7a792
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -0,0 +1,2197 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
+#define I40E_FW_API_VERSION_A0_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+ u8 rx_cnt;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x0210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ * uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ * uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+
+ __le32 tenant_id;
+ u8 reserved[4];
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved2[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
+ u8 tc_strict_priority_flags;
+ u8 reserved1[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved2[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum bit patterns */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK 0x08
+#define I40E_AQ_PHY_ENABLE_AN 0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 reserved0[3];
+ u8 protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 filter_entry_index;
+ u8 multiple_pfs;
+#define I40E_AQC_SINGLE_PF 0x0
+#define I40E_AQC_MULTIPLE_PFS 0x1
+ u8 total_filters;
+ u8 reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_pfs;
+ u8 total_filters_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure_A0 {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0);
+
+struct i40e_aqc_tunnel_key_structure {
+ u8 key1_off;
+ u8 key2_off;
+ u8 key1_len; /* 0 to 15 */
+ u8 key2_len; /* 0 to 15 */
+ u8 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+#define I40E_AQ_CLUSTER_ID_ALTRAM 11
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
new file mode 100644
index 00000000000..8e6a6dd9212
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
new file mode 100644
index 00000000000..a43155afdbe
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -0,0 +1,624 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_DEV_ID_SFP_XL710:
+ case I40E_DEV_ID_QEMU:
+ case I40E_DEV_ID_KX_A:
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_KX_C:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_DEV_ID_VF:
+ case I40E_DEV_ID_VF_HV:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40evf_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40evf_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if Queue is enabled else false.
+ **/
+bool i40evf_check_asq_alive(struct i40e_hw *hw)
+{
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
+}
+
+/**
+ * i40evf_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+
+/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40evf_ptype_lookup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+ I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ I40E_RX_PTYPE_##OUTER_FRAG, \
+ I40E_RX_PTYPE_TUNNEL_##T, \
+ I40E_RX_PTYPE_TUNNEL_END_##TE, \
+ I40E_RX_PTYPE_##TEF, \
+ I40E_RX_PTYPE_INNER_PROT_##I, \
+ I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+ { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
+ /* L2 Packet types */
+ I40E_PTT_UNUSED_ENTRY(0),
+ I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
+ I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(4),
+ I40E_PTT_UNUSED_ENTRY(5),
+ I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT_UNUSED_ENTRY(8),
+ I40E_PTT_UNUSED_ENTRY(9),
+ I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+ /* Non Tunneled IPv4 */
+ I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(25),
+ I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(32),
+ I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(39),
+ I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(47),
+ I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(54),
+ I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(62),
+ I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(69),
+ I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(77),
+ I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(84),
+ I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT_UNUSED_ENTRY(91),
+ I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(98),
+ I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(105),
+ I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(113),
+ I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(120),
+ I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(128),
+ I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(135),
+ I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(143),
+ I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ I40E_PTT_UNUSED_ENTRY(150),
+ I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ I40E_PTT_UNUSED_ENTRY(154),
+ I40E_PTT_UNUSED_ENTRY(155),
+ I40E_PTT_UNUSED_ENTRY(156),
+ I40E_PTT_UNUSED_ENTRY(157),
+ I40E_PTT_UNUSED_ENTRY(158),
+ I40E_PTT_UNUSED_ENTRY(159),
+
+ I40E_PTT_UNUSED_ENTRY(160),
+ I40E_PTT_UNUSED_ENTRY(161),
+ I40E_PTT_UNUSED_ENTRY(162),
+ I40E_PTT_UNUSED_ENTRY(163),
+ I40E_PTT_UNUSED_ENTRY(164),
+ I40E_PTT_UNUSED_ENTRY(165),
+ I40E_PTT_UNUSED_ENTRY(166),
+ I40E_PTT_UNUSED_ENTRY(167),
+ I40E_PTT_UNUSED_ENTRY(168),
+ I40E_PTT_UNUSED_ENTRY(169),
+
+ I40E_PTT_UNUSED_ENTRY(170),
+ I40E_PTT_UNUSED_ENTRY(171),
+ I40E_PTT_UNUSED_ENTRY(172),
+ I40E_PTT_UNUSED_ENTRY(173),
+ I40E_PTT_UNUSED_ENTRY(174),
+ I40E_PTT_UNUSED_ENTRY(175),
+ I40E_PTT_UNUSED_ENTRY(176),
+ I40E_PTT_UNUSED_ENTRY(177),
+ I40E_PTT_UNUSED_ENTRY(178),
+ I40E_PTT_UNUSED_ENTRY(179),
+
+ I40E_PTT_UNUSED_ENTRY(180),
+ I40E_PTT_UNUSED_ENTRY(181),
+ I40E_PTT_UNUSED_ENTRY(182),
+ I40E_PTT_UNUSED_ENTRY(183),
+ I40E_PTT_UNUSED_ENTRY(184),
+ I40E_PTT_UNUSED_ENTRY(185),
+ I40E_PTT_UNUSED_ENTRY(186),
+ I40E_PTT_UNUSED_ENTRY(187),
+ I40E_PTT_UNUSED_ENTRY(188),
+ I40E_PTT_UNUSED_ENTRY(189),
+
+ I40E_PTT_UNUSED_ENTRY(190),
+ I40E_PTT_UNUSED_ENTRY(191),
+ I40E_PTT_UNUSED_ENTRY(192),
+ I40E_PTT_UNUSED_ENTRY(193),
+ I40E_PTT_UNUSED_ENTRY(194),
+ I40E_PTT_UNUSED_ENTRY(195),
+ I40E_PTT_UNUSED_ENTRY(196),
+ I40E_PTT_UNUSED_ENTRY(197),
+ I40E_PTT_UNUSED_ENTRY(198),
+ I40E_PTT_UNUSED_ENTRY(199),
+
+ I40E_PTT_UNUSED_ENTRY(200),
+ I40E_PTT_UNUSED_ENTRY(201),
+ I40E_PTT_UNUSED_ENTRY(202),
+ I40E_PTT_UNUSED_ENTRY(203),
+ I40E_PTT_UNUSED_ENTRY(204),
+ I40E_PTT_UNUSED_ENTRY(205),
+ I40E_PTT_UNUSED_ENTRY(206),
+ I40E_PTT_UNUSED_ENTRY(207),
+ I40E_PTT_UNUSED_ENTRY(208),
+ I40E_PTT_UNUSED_ENTRY(209),
+
+ I40E_PTT_UNUSED_ENTRY(210),
+ I40E_PTT_UNUSED_ENTRY(211),
+ I40E_PTT_UNUSED_ENTRY(212),
+ I40E_PTT_UNUSED_ENTRY(213),
+ I40E_PTT_UNUSED_ENTRY(214),
+ I40E_PTT_UNUSED_ENTRY(215),
+ I40E_PTT_UNUSED_ENTRY(216),
+ I40E_PTT_UNUSED_ENTRY(217),
+ I40E_PTT_UNUSED_ENTRY(218),
+ I40E_PTT_UNUSED_ENTRY(219),
+
+ I40E_PTT_UNUSED_ENTRY(220),
+ I40E_PTT_UNUSED_ENTRY(221),
+ I40E_PTT_UNUSED_ENTRY(222),
+ I40E_PTT_UNUSED_ENTRY(223),
+ I40E_PTT_UNUSED_ENTRY(224),
+ I40E_PTT_UNUSED_ENTRY(225),
+ I40E_PTT_UNUSED_ENTRY(226),
+ I40E_PTT_UNUSED_ENTRY(227),
+ I40E_PTT_UNUSED_ENTRY(228),
+ I40E_PTT_UNUSED_ENTRY(229),
+
+ I40E_PTT_UNUSED_ENTRY(230),
+ I40E_PTT_UNUSED_ENTRY(231),
+ I40E_PTT_UNUSED_ENTRY(232),
+ I40E_PTT_UNUSED_ENTRY(233),
+ I40E_PTT_UNUSED_ENTRY(234),
+ I40E_PTT_UNUSED_ENTRY(235),
+ I40E_PTT_UNUSED_ENTRY(236),
+ I40E_PTT_UNUSED_ENTRY(237),
+ I40E_PTT_UNUSED_ENTRY(238),
+ I40E_PTT_UNUSED_ENTRY(239),
+
+ I40E_PTT_UNUSED_ENTRY(240),
+ I40E_PTT_UNUSED_ENTRY(241),
+ I40E_PTT_UNUSED_ENTRY(242),
+ I40E_PTT_UNUSED_ENTRY(243),
+ I40E_PTT_UNUSED_ENTRY(244),
+ I40E_PTT_UNUSED_ENTRY(245),
+ I40E_PTT_UNUSED_ENTRY(246),
+ I40E_PTT_UNUSED_ENTRY(247),
+ I40E_PTT_UNUSED_ENTRY(248),
+ I40E_PTT_UNUSED_ENTRY(249),
+
+ I40E_PTT_UNUSED_ENTRY(250),
+ I40E_PTT_UNUSED_ENTRY(251),
+ I40E_PTT_UNUSED_ENTRY(252),
+ I40E_PTT_UNUSED_ENTRY(253),
+ I40E_PTT_UNUSED_ENTRY(254),
+ I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF
+ | I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ if (!cmd_details) {
+ struct i40e_asq_cmd_details details;
+ memset(&details, 0, sizeof(details));
+ details.async = true;
+ cmd_details = &details;
+ }
+ status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+ return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg)
+{
+ struct i40e_virtchnl_vsi_resource *vsi_res;
+ int i;
+
+ vsi_res = &msg->vsi_res[0];
+
+ hw->dev_caps.num_vsis = msg->num_vsis;
+ hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+ hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+ hw->dev_caps.dcb = msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ hw->dev_caps.fcoe = (msg->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+ for (i = 0; i < msg->num_vsis; i++) {
+ if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+ memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+ ETH_ALEN);
+ }
+ vsi_res++;
+ }
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+i40e_status i40e_vf_reset(struct i40e_hw *hw)
+{
+ return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+ 0, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
new file mode 100644
index 00000000000..a2ad9a4e399
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -0,0 +1,236 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
new file mode 100644
index 00000000000..d6f76224153
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
new file mode 100644
index 00000000000..21a91b14bf8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -0,0 +1,75 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_VFGEN_RSTAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40evf_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+ __attribute__ ((format(gnu_printf, 3, 4)));
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
new file mode 100644
index 00000000000..849edcc2e39
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -0,0 +1,94 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40evf_init_adminq(struct i40e_hw *hw);
+i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40evf_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40evf_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40evf_resume_aq(struct i40e_hw *hw);
+bool i40evf_check_asq_alive(struct i40e_hw *hw);
+i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+ return i40evf_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
new file mode 100644
index 00000000000..36983965581
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -0,0 +1,4674 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
+#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PCITEST2 0x000BE4BC
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
+#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
+#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
+
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT 7
+#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+/* _i=0...143 */
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
+#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
new file mode 100644
index 00000000000..7fa7a41915c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
new file mode 100644
index 00000000000..48ebb6cd69f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -0,0 +1,1667 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/prefetch.h>
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+/**
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_single(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
+}
+
+/**
+ * i40evf_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++)
+ i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (!tx_ring->netdev)
+ return;
+
+ /* cleanup Tx queue statistics */
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index));
+}
+
+/**
+ * i40evf_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40evf_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_head;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buf->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+}
+
+/**
+ * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ /* add u32 for head writeback, align after this takes care of
+ * guaranteeing this is at least one cache line in size
+ */
+ tx_ring->size += sizeof(u32);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40evf_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40evf_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40evf_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ * @rx_ptype: ptype value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error,
+ u16 rx_ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
+ bool ipv4_tunnel, ipv6_tunnel;
+ __wsum rx_udp_csum;
+ struct iphdr *iph;
+ __sum16 csum;
+
+ ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+
+ skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
+ /* likely incorrect csum if alternate IP extension headers found */
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
+ return;
+
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+ return;
+
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
+ if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ skb->transport_header = skb->mac_header +
+ sizeof(struct ethhdr) +
+ (ip_hdr(skb)->ihl * 4);
+
+ /* Add 4 bytes for VLAN tagged packets */
+ skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ ? VLAN_HLEN : 0;
+
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
+
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ const __le64 rss_mask =
+ cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
+
+ if ((ring->netdev->features & NETIF_F_RXHASH) &&
+ (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ else
+ return 0;
+}
+
+/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return PKT_HASH_TYPE_NONE;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+ return PKT_HASH_TYPE_L4;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+ return PKT_HASH_TYPE_L3;
+ else
+ return PKT_HASH_TYPE_L2;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u8 rx_ptype;
+ u64 qword;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
+ goto next_desc;
+ }
+
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40evf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ struct i40e_ring *ring;
+ bool clean_complete = true;
+ int budget_per_ring;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ i40e_for_each_ring(ring, q_vector->tx)
+ clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+
+ i40e_for_each_ring(ring, q_vector->rx)
+ clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state))
+ i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
+
+ return 0;
+}
+
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ if (protocol == htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((u64)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+ int i = tx_ring->next_to_use;
+
+ if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+ !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct skb_frag_struct *frag;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u16 i = tx_ring->next_to_use;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+ first->gso_segs = gso_segs;
+ first->skb = skb;
+ first->tx_flags = tx_flags;
+
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ tx_bi = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ dma += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_bi = &tx_ring->tx_bi[i];
+ }
+
+ /* Place RS bit on last descriptor of any packet that spans across the
+ * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+ */
+#define WB_STRIDE 0x3
+ if (((i & WB_STRIDE) != WB_STRIDE) &&
+ (first <= &tx_ring->tx_bi[i]) &&
+ (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ } else {
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset, size, td_tag) |
+ cpu_to_le64((u64)I40E_TXD_CMD <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index),
+ first->bytecount);
+
+ /* set the timestamp */
+ first->time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ writel(i, tx_ring->tail);
+
+ return;
+
+dma_error:
+ dev_info(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ unsigned int f;
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* always enable CRC insertion offload */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ }
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
new file mode 100644
index 00000000000..30d248bc5d1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -0,0 +1,287 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TXRX_H_
+#define _I40E_TXRX_H_
+
+/* Interrupt Throttling and Rate Limiting Goodies */
+
+#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
+#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+/* this enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum i40e_dyn_idx_t {
+ I40E_IDX_ITR0 = 0,
+ I40E_IDX_ITR1 = 1,
+ I40E_IDX_ITR2 = 2,
+ I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* these are indexes into ITRN registers */
+#define I40E_RX_ITR I40E_IDX_ITR0
+#define I40E_TX_ITR I40E_IDX_ITR1
+#define I40E_PE_ITR I40E_IDX_ITR2
+
+/* Supported RSS offloads */
+#define I40E_DEFAULT_RSS_HENA ( \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 8192
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct i40e_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_page_failed;
+ u64 alloc_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ struct i40e_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ struct i40e_queue_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+ struct rcu_head rcu; /* to avoid race on free */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+ /* array of pointers to rings */
+ struct i40e_ring *ring;
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
+void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
+int i40evf_napi_poll(struct napi_struct *napi, int budget);
+#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
new file mode 100644
index 00000000000..d3cf5a69de5
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -0,0 +1,1165 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710 0x1572
+#define I40E_DEV_ID_QEMU 0x1574
+#define I40E_DEV_ID_KX_A 0x157F
+#define I40E_DEV_ID_KX_B 0x1580
+#define I40E_DEV_ID_KX_C 0x1581
+#define I40E_DEV_ID_QSFP_A 0x1583
+#define I40E_DEV_ID_QSFP_B 0x1584
+#define I40E_DEV_ID_QSFP_C 0x1585
+#define I40E_DEV_ID_VF 0x154C
+#define I40E_DEV_ID_VF_HV 0x1571
+
+#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
+ (d) == I40E_DEV_ID_QSFP_B || \
+ (d) == I40E_DEV_ID_QSFP_C)
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_VM_QUEUE 0x1
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_DA,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 loopback;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+ u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ /* Flow director filter id in case of
+ * Programming status desc WB
+ */
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
+ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
+ I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
+ I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK (0x1UL << \
+ I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
+ I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
+ I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
+ I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
+ I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Values 0-30 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-40 are reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ I40E_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_WAKE_ON_LAN 0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
new file mode 100644
index 00000000000..cd18d568900
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -0,0 +1,363 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[ETH_ALEN];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[ETH_ALEN];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
new file mode 100644
index 00000000000..30ef519d4b9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -0,0 +1,298 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40EVF_H_
+#define _I40EVF_H_
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/udp.h>
+
+#include "i40e_type.h"
+#include "i40e_virtchnl.h"
+#include "i40e_txrx.h"
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+#define PFX "i40evf: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args)))
+
+/* dummy struct to make common code less painful */
+struct i40e_vsi {
+ struct i40evf_adapter *back;
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u16 seid;
+ u16 id;
+ unsigned long state;
+ int base_vector;
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40EVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40EVF_DEFAULT_TXD 512
+#define I40EVF_DEFAULT_RXD 512
+#define I40EVF_MAX_TXD 4096
+#define I40EVF_MIN_TXD 64
+#define I40EVF_MAX_RXD 4096
+#define I40EVF_MIN_RXD 64
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
+
+/* Supported Rx Buffer Sizes */
+#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
+#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
+#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
+#define I40EVF_RXBUFFER_2048 2048
+#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define I40EVF_MAX_AQ_BUF_SIZE 4096
+#define I40EVF_AQ_LEN 32
+#define I40EVF_AQ_MAX_ERR 10 /* times to try before resetting AQ */
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define I40E_RX_DESC(R, i) (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+#define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define MAX_RX_QUEUES 8
+#define MAX_TX_QUEUES MAX_RX_QUEUES
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct i40e_q_vector {
+ struct i40evf_adapter *adapter;
+ struct i40e_vsi *vsi;
+ struct napi_struct napi;
+ unsigned long reg_idx;
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+ u32 ring_mask;
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+ int v_idx; /* vector index in list */
+ char name[IFNAMSIZ + 9];
+ cpumask_var_t affinity_mask;
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the i40e hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define I40EVF_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define I40EVF_RX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_rx_desc *)((R).desc))[i]))
+#define I40EVF_TX_DESC_ADV(R, i) \
+ (&(((union i40e_adv_tx_desc *)((R).desc))[i]))
+#define I40EVF_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct i40e_adv_tx_context_desc *)((R).desc))[i]))
+
+#define OTHER_VECTOR 1
+#define NONQ_VECS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 4
+#define MAX_MSIX_COUNT 5
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
+
+#define I40EVF_QUEUE_END_OF_LIST 0x7FF
+#define I40EVF_FREE_VECTOR 0x7FFF
+struct i40evf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+struct i40evf_vlan_filter {
+ struct list_head list;
+ u16 vlan;
+ bool remove; /* filter needs to be removed */
+ bool add; /* filter needs to be added */
+};
+
+/* Driver state. The order of these is important! */
+enum i40evf_state_t {
+ __I40EVF_STARTUP, /* driver loaded, probe complete */
+ __I40EVF_REMOVE, /* driver is being unloaded */
+ __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __I40EVF_INIT_SW, /* got resources, setting up structs */
+ __I40EVF_RESETTING, /* in reset */
+ /* Below here, watchdog is running */
+ __I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_TESTING, /* in ethtool self-test */
+ __I40EVF_RUNNING, /* opened, working */
+};
+
+enum i40evf_critical_section_t {
+ __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
+};
+/* make common code happy */
+#define __I40E_DOWN __I40EVF_DOWN
+
+/* board specific private data structure */
+struct i40evf_adapter {
+ struct timer_list watchdog_timer;
+ struct work_struct reset_task;
+ struct work_struct adminq_task;
+ struct delayed_work init_task;
+ struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct list_head vlan_filter_list;
+ char misc_vector_name[IFNAMSIZ + 9];
+
+ /* TX */
+ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
+ u32 tx_timeout_count;
+ struct list_head mac_filter_list;
+ u32 tx_desc_count;
+
+ /* RX */
+ struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
+ u64 hw_csum_rx_error;
+ u32 rx_desc_count;
+ int num_msix_vectors;
+ struct msix_entry *msix_entries;
+
+ u32 flags;
+#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define I40EVF_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define I40EVF_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
+#define I40EVF_FLAG_RESET_NEEDED (u32)(1 << 10)
+/* duplcates for common code */
+#define I40E_FLAG_FDIR_ATR_ENABLED 0
+#define I40E_FLAG_DCB_ENABLED 0
+#define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL
+#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
+ /* flags for admin queue service task */
+ u32 aq_required;
+ u32 aq_pending;
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ struct i40e_hw hw; /* defined in i40e_type.h */
+
+ enum i40evf_state_t state;
+ volatile unsigned long crit_section;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool link_up;
+ enum i40e_virtchnl_ops current_op;
+ struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
+ struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+ u16 msg_enable;
+ struct i40e_eth_stats current_stats;
+ struct i40e_vsi vsi;
+ u32 aq_wait_count;
+};
+
+
+/* needed by i40evf_ethtool.c */
+extern char i40evf_driver_name[];
+extern const char i40evf_driver_version[];
+
+int i40evf_up(struct i40evf_adapter *adapter);
+void i40evf_down(struct i40evf_adapter *adapter);
+void i40evf_reinit_locked(struct i40evf_adapter *adapter);
+void i40evf_reset(struct i40evf_adapter *adapter);
+void i40evf_set_ethtool_ops(struct net_device *netdev);
+void i40evf_update_stats(struct i40evf_adapter *adapter);
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter);
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter);
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask);
+
+void i40e_napi_add_all(struct i40evf_adapter *adapter);
+void i40e_napi_del_all(struct i40evf_adapter *adapter);
+
+int i40evf_send_api_ver(struct i40evf_adapter *adapter);
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter);
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter);
+int i40evf_get_vf_config(struct i40evf_adapter *adapter);
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush);
+void i40evf_configure_queues(struct i40evf_adapter *adapter);
+void i40evf_deconfigure_queues(struct i40evf_adapter *adapter);
+void i40evf_enable_queues(struct i40evf_adapter *adapter);
+void i40evf_disable_queues(struct i40evf_adapter *adapter);
+void i40evf_map_queues(struct i40evf_adapter *adapter);
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter);
+void i40evf_add_vlans(struct i40evf_adapter *adapter);
+void i40evf_del_vlans(struct i40evf_adapter *adapter);
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
+void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg, u16 msglen);
+#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
new file mode 100644
index 00000000000..60407a9df0c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -0,0 +1,704 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40evf */
+#include "i40evf.h"
+
+#include <linux/uaccess.h>
+
+
+struct i40evf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define I40EVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .stat_offset = offsetof(struct i40evf_adapter, _stat) \
+}
+
+/* All stats are u64, so we don't need to track the size of the field. */
+static const struct i40evf_stats i40evf_gstrings_stats[] = {
+ I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
+ I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
+ I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
+ I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
+ I40EVF_STAT("rx_discards", current_stats.rx_discards),
+ I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
+ I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
+ I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
+ I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
+ I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
+ I40EVF_STAT("tx_discards", current_stats.tx_discards),
+ I40EVF_STAT("tx_errors", current_stats.tx_errors),
+};
+
+#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
+ (((struct i40evf_adapter *) \
+ netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+
+/**
+ * i40evf_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings. Because this is a VF, we don't know what
+ * kind of link we really have, so we fake it.
+ **/
+static int i40evf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ /* In the future the VF will be able to query the PF for
+ * some information - for now use a dummy value
+ */
+ ecmd->supported = 0;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = PORT_NONE;
+
+ return 0;
+}
+
+/**
+ * i40evf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of string table. This driver only supports
+ * strings for statistics.
+ **/
+static int i40evf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return I40EVF_STATS_LEN(netdev);
+ else
+ return -EINVAL;
+}
+
+/**
+ * i40evf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ **/
+static void i40evf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int i, j;
+ char *p;
+
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
+ data[i] = *(u64 *)p;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->tx_rings[j]->stats.packets;
+ data[i++] = adapter->tx_rings[j]->stats.bytes;
+ }
+ for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) {
+ data[i++] = adapter->rx_rings[j]->stats.packets;
+ data[i++] = adapter->rx_rings[j]->stats.bytes;
+ }
+}
+
+/**
+ * i40evf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds stats string table.
+ **/
+static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ if (sset == ETH_SS_STATS) {
+ for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, i40evf_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+/**
+ * i40evf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ **/
+static u32 i40evf_get_msglevel(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+/**
+ * i40evf_get_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ **/
+static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+/**
+ * i40evf_get_drvinto - Get driver info
+ * @netdev: network interface device structure
+ * @drvinfo: ethool driver info structure
+ *
+ * Returns information about the driver and device for display to the user.
+ **/
+static void i40evf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, i40evf_driver_name, 32);
+ strlcpy(drvinfo->version, i40evf_driver_version, 32);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+/**
+ * i40evf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ **/
+static void i40evf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ ring->rx_max_pending = I40EVF_MAX_RXD;
+ ring->tx_max_pending = I40EVF_MAX_TXD;
+ ring->rx_pending = adapter->rx_desc_count;
+ ring->tx_pending = adapter->tx_desc_count;
+}
+
+/**
+ * i40evf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ **/
+static int i40evf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ u32 new_rx_count, new_tx_count;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40EVF_MIN_TXD,
+ I40EVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40EVF_MIN_RXD,
+ I40EVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_desc_count) &&
+ (new_rx_count == adapter->rx_desc_count))
+ return 0;
+
+ adapter->tx_desc_count = new_tx_count;
+ adapter->rx_desc_count = new_rx_count;
+
+ if (netif_running(netdev))
+ i40evf_reinit_locked(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_coalesce - Get interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality.
+ **/
+static int i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
+
+ ec->tx_max_coalesced_frames = vsi->work_limit;
+ ec->rx_max_coalesced_frames = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->use_adaptive_rx_coalesce = 1;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
+ return 0;
+}
+
+/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_q_vector *q_vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ else
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ else if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops i40evf_ethtool_ops = {
+ .get_settings = i40evf_get_settings,
+ .get_drvinfo = i40evf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = i40evf_get_ringparam,
+ .set_ringparam = i40evf_set_ringparam,
+ .get_strings = i40evf_get_strings,
+ .get_ethtool_stats = i40evf_get_ethtool_stats,
+ .get_sset_count = i40evf_get_sset_count,
+ .get_msglevel = i40evf_get_msglevel,
+ .set_msglevel = i40evf_set_msglevel,
+ .get_coalesce = i40evf_get_coalesce,
+ .set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh = i40evf_get_rxfh,
+ .set_rxfh = i40evf_set_rxfh,
+ .get_channels = i40evf_get_channels,
+};
+
+/**
+ * i40evf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ **/
+void i40evf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &i40evf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
new file mode 100644
index 00000000000..7fc5f3b5d6b
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -0,0 +1,2475 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
+static int i40evf_close(struct net_device *netdev);
+
+char i40evf_driver_name[] = "i40evf";
+static const char i40evf_driver_string[] =
+ "Intel(R) XL710 X710 Virtual Function Network Driver";
+
+#define DRV_VERSION "0.9.34"
+const char i40evf_driver_version[] = DRV_VERSION;
+static const char i40evf_copyright[] =
+ "Copyright (c) 2013 - 2014 Intel Corporation.";
+
+/* i40evf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40evf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+i40e_status i40evf_allocate_dma_mem_d(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)hw->back;
+
+ if (!mem || !mem->va)
+ return I40E_ERR_PARAM;
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ return 0;
+}
+
+/**
+ * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+i40e_status i40evf_allocate_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40E_ERR_NO_MEMORY;
+}
+
+/**
+ * i40evf_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+i40e_status i40evf_free_virt_mem_d(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem)
+{
+ if (!mem)
+ return I40E_ERR_PARAM;
+
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+
+ return 0;
+}
+
+/**
+ * i40evf_debug_d - OS dependent version of debug printing
+ * @hw: pointer to the HW structure
+ * @mask: debug level mask
+ * @fmt_str: printf-type format description
+ **/
+void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
+{
+ char buf[512];
+ va_list argptr;
+
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
+
+ va_start(argptr, fmt_str);
+ vsnprintf(buf, sizeof(buf), fmt_str, argptr);
+ va_end(argptr);
+
+ /* the debug string is already formatted with a newline */
+ pr_info("%s", buf);
+}
+
+/**
+ * i40evf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void i40evf_tx_timeout(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ adapter->tx_timeout_count++;
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
+ * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+ synchronize_irq(adapter->msix_entries[0].vector);
+}
+
+/**
+ * i40evf_misc_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
+ I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+ wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void i40evf_irq_disable(struct i40evf_adapter *adapter)
+{
+ int i;
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (!adapter->msix_entries)
+ return;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
+ synchronize_irq(adapter->msix_entries[i].vector);
+ }
+ /* read flush */
+ rd32(hw, I40E_VFGEN_RSTAT);
+
+}
+
+/**
+ * i40evf_irq_enable_queues - Enable interrupt for specified queues
+ * @adapter: board private structure
+ * @mask: bitmap of queues to enable
+ **/
+void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << (i - 1))) {
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
+ I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ }
+ }
+}
+
+/**
+ * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
+ * @adapter: board private structure
+ * @mask: bitmap of vectors to trigger
+ **/
+static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
+ u32 mask)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ uint32_t dyn_ctl;
+
+ for (i = 1; i < adapter->num_msix_vectors; i++) {
+ if (mask & (1 << i)) {
+ dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
+ dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+ }
+ }
+}
+
+/**
+ * i40evf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ i40evf_irq_enable_queues(adapter, ~0);
+
+ if (flush)
+ rd32(hw, I40E_VFGEN_RSTAT);
+}
+
+/**
+ * i40evf_msix_aq - Interrupt handler for vector 0
+ * @irq: interrupt number
+ * @data: pointer to netdev
+ **/
+static irqreturn_t i40evf_msix_aq(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 val;
+ u32 ena_mask;
+
+ /* handle non-queue interrupts */
+ val = rd32(hw, I40E_VFINT_ICR01);
+ ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+
+ val = rd32(hw, I40E_VFINT_DYN_CTL01);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_VFINT_DYN_CTL01, val);
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
+ wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
+
+ /* schedule work on the private workqueue */
+ schedule_work(&adapter->adminq_task);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring && !q_vector->rx.ring)
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40evf_map_vector_to_rxq - associate irqs with rx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @r_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *rx_ring = adapter->rx_rings[r_idx];
+
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = q_vector->rx.ring;
+ rx_ring->vsi = &adapter->vsi;
+ q_vector->rx.ring = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+}
+
+/**
+ * i40evf_map_vector_to_txq - associate irqs with tx queues
+ * @adapter: board private structure
+ * @v_idx: interrupt number
+ * @t_idx: queue number
+ **/
+static void
+i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct i40e_ring *tx_ring = adapter->tx_rings[t_idx];
+
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = q_vector->tx.ring;
+ tx_ring->vsi = &adapter->vsi;
+ q_vector->tx.ring = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->ring_mask |= (1 << t_idx);
+}
+
+/**
+ * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->vsi_res->num_queue_pairs;
+ int txr_remaining = adapter->vsi_res->num_queue_pairs;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ /* The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == (rxr_remaining * 2)) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ i40evf_map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /* If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ * Re-adjusting *qpv takes care of the remainder.
+ */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ i40evf_map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ i40evf_map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
+
+ return err;
+}
+
+/**
+ * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vectors for tx and rx handling, and requests
+ * interrupts from the kernel.
+ **/
+static int
+i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
+{
+ int vector, err, q_vectors;
+ int rx_int_idx = 0, tx_int_idx = 0;
+
+ i40evf_irq_disable(adapter);
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "i40evf-%s-%s-%d", basename,
+ "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(
+ adapter->msix_entries[vector + NONQ_VECS].vector,
+ NULL);
+ free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
+ adapter->q_vector[vector]);
+ }
+ return err;
+}
+
+/**
+ * i40evf_request_misc_irq - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
+ * vector is only for the admin queue, and stays active even when the netdev
+ * is closed.
+ **/
+static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ sprintf(adapter->misc_vector_name, "i40evf:mbx");
+ err = request_irq(adapter->msix_entries[0].vector,
+ &i40evf_msix_aq, 0,
+ adapter->misc_vector_name, netdev);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "request_irq for %s failed: %d\n",
+ adapter->misc_vector_name, err);
+ free_irq(adapter->msix_entries[0].vector, netdev);
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_traffic_irqs - Free MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * Frees all MSI-X vectors other than 0.
+ **/
+static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
+{
+ int i;
+ int q_vectors;
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (i = 0; i < q_vectors; i++) {
+ irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
+ NULL);
+ free_irq(adapter->msix_entries[i+1].vector,
+ adapter->q_vector[i]);
+ }
+}
+
+/**
+ * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
+ * @adapter: board private structure
+ *
+ * Frees MSI-X vector 0.
+ **/
+static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ free_irq(adapter->msix_entries[0].vector, netdev);
+}
+
+/**
+ * i40evf_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_tx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i);
+}
+
+/**
+ * i40evf_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void i40evf_configure_rx(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i;
+ int rx_buf_len;
+
+
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ rx_buf_len = I40E_RX_HDR_SIZE;
+ } else {
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i);
+ adapter->rx_rings[i]->rx_buf_len = rx_buf_len;
+ }
+}
+
+/**
+ * i40evf_find_vlan - Search filter list for specific vlan filter
+ * @adapter: board private structure
+ * @vlan: vlan tag
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (vlan == f->vlan)
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_add_vlan - Add a vlan filter to the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ return NULL;
+
+ f->vlan = vlan;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &adapter->vlan_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+
+ return f;
+}
+
+/**
+ * i40evf_del_vlan - Remove a vlan filter from the list
+ * @adapter: board private structure
+ * @vlan: VLAN tag
+ **/
+static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
+{
+ struct i40evf_vlan_filter *f;
+
+ f = i40evf_find_vlan(adapter, vlan);
+ if (f) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ }
+}
+
+/**
+ * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (i40evf_add_vlan(adapter, vid) == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
+ * @netdev: network device struct
+ * @vid: VLAN tag
+ **/
+static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ i40evf_del_vlan(adapter, vid);
+ return 0;
+}
+
+/**
+ * i40evf_find_filter - Search filter list for specific mac filter
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct
+i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_add_filter - Add a mac filter to the filter list
+ * @adapter: board private structure
+ * @macaddr: the MAC address
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+static struct
+i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
+ u8 *macaddr)
+{
+ struct i40evf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+
+ f = i40evf_find_filter(adapter, macaddr);
+ if (NULL == f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f) {
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ return NULL;
+ }
+
+ ether_addr_copy(f->macaddr, macaddr);
+
+ list_add(&f->list, &adapter->mac_filter_list);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ }
+
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return f;
+}
+
+/**
+ * i40evf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40evf_mac_filter *f;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ f = i40evf_add_filter(adapter, addr->sa_data);
+ if (f) {
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ }
+
+ return (f == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f, *ftmp;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ i40evf_add_filter(adapter, uca->addr);
+ }
+ netdev_for_each_mc_addr(mca, netdev) {
+ i40evf_add_filter(adapter, mca->addr);
+ }
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ mdelay(1);
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ bool found = false;
+
+ if (is_multicast_ether_addr(f->macaddr)) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (found) {
+ f->remove = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ }
+ }
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+}
+
+/**
+ * i40evf_napi_enable_all - enable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ napi = &q_vector->napi;
+ napi_enable(napi);
+ }
+}
+
+/**
+ * i40evf_napi_disable_all - disable NAPI on all queue vectors
+ * @adapter: board private structure
+ **/
+static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
+{
+ int q_idx;
+ struct i40e_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+
+/**
+ * i40evf_configure - set up transmit and receive data structures
+ * @adapter: board private structure
+ **/
+static void i40evf_configure(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ i40evf_set_rx_mode(netdev);
+
+ i40evf_configure_tx(adapter);
+ i40evf_configure_rx(adapter);
+ adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *ring = adapter->rx_rings[i];
+ i40evf_alloc_rx_buffers(ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, ring->tail);
+ }
+}
+
+/**
+ * i40evf_up_complete - Finish the last steps of bringing up a connection
+ * @adapter: board private structure
+ **/
+static int i40evf_up_complete(struct i40evf_adapter *adapter)
+{
+ adapter->state = __I40EVF_RUNNING;
+ clear_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_napi_enable_all(adapter);
+
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ return 0;
+}
+
+/**
+ * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_rx_ring(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ i40evf_clean_tx_ring(adapter->tx_rings[i]);
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @adapter: board private structure
+ **/
+void i40evf_down(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->state == __I40EVF_DOWN)
+ return;
+
+ /* remove all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->remove = true;
+ }
+ /* remove all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->remove = true;
+ }
+ if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __I40EVF_RESETTING) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+ }
+ netif_tx_disable(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ i40evf_irq_disable(adapter);
+
+ i40evf_napi_disable_all(adapter);
+
+ netif_carrier_off(netdev);
+
+ i40evf_clean_all_tx_rings(adapter);
+ i40evf_clean_all_rx_rings(adapter);
+}
+
+/**
+ * i40evf_acquire_msix_vectors - Setup the MSIX capability
+ * @adapter: board private structure
+ * @vectors: number of vectors to request
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int
+i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 0) Other (Admin Queue and link, mostly)
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+ }
+
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = err;
+ return 0;
+}
+
+/**
+ * i40evf_free_queues - Free memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * Free all of the memory associated with queue pairs.
+ **/
+static void i40evf_free_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->vsi_res)
+ return;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ if (adapter->tx_rings[i])
+ kfree_rcu(adapter->tx_rings[i], rcu);
+ adapter->tx_rings[i] = NULL;
+ adapter->rx_rings[i] = NULL;
+ }
+}
+
+/**
+ * i40evf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ struct i40e_ring *tx_ring;
+ struct i40e_ring *rx_ring;
+
+ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+ if (!tx_ring)
+ goto err_out;
+
+ tx_ring->queue_index = i;
+ tx_ring->netdev = adapter->netdev;
+ tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->count = adapter->tx_desc_count;
+ adapter->tx_rings[i] = tx_ring;
+
+ rx_ring = &tx_ring[1];
+ rx_ring->queue_index = i;
+ rx_ring->netdev = adapter->netdev;
+ rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->count = adapter->rx_desc_count;
+ adapter->rx_rings[i] = rx_ring;
+ }
+
+ return 0;
+
+err_out:
+ i40evf_free_queues(adapter);
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ int vector, v_budget;
+ int pairs = 0;
+ int err = 0;
+
+ if (!adapter->vsi_res) {
+ err = -EIO;
+ goto out;
+ }
+ pairs = adapter->vsi_res->num_queue_pairs;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+ v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
+
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ i40evf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ adapter->netdev->real_num_tx_queues = pairs;
+ return err;
+}
+
+/**
+ * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct i40e_q_vector *q_vector;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->vsi = &adapter->vsi;
+ q_vector->v_idx = q_idx;
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+ napi_vectors = adapter->vsi_res->num_queue_pairs;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct i40e_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * i40evf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+/**
+ * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
+{
+ int err;
+
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = i40evf_alloc_q_vectors(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queue vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = i40evf_alloc_queues(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
+ (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" :
+ "Disabled", adapter->vsi_res->num_queue_pairs);
+
+ return 0;
+err_alloc_queues:
+ i40evf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ i40evf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * i40evf_watchdog_timer - Periodic call-back timer
+ * @data: pointer to adapter disguised as unsigned long
+ **/
+static void i40evf_watchdog_timer(unsigned long data)
+{
+ struct i40evf_adapter *adapter = (struct i40evf_adapter *)data;
+ schedule_work(&adapter->watchdog_task);
+ /* timer will be rescheduled in watchdog task */
+}
+
+/**
+ * i40evf_watchdog_task - Periodic call-back task
+ * @work: pointer to work_struct
+ **/
+static void i40evf_watchdog_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ watchdog_task);
+ struct i40e_hw *hw = &adapter->hw;
+
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __I40EVF_STARTUP;
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ schedule_delayed_work(&adapter->init_task, 10);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ goto watchdog_done;
+ }
+
+ if ((adapter->state < __I40EVF_DOWN) ||
+ (adapter->flags & I40EVF_FLAG_RESET_PENDING))
+ goto watchdog_done;
+
+ /* check for reset */
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
+ (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+ adapter->state = __I40EVF_RESETTING;
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
+ schedule_work(&adapter->reset_task);
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ goto watchdog_done;
+ }
+
+ /* Process admin queue tasks. After init, everything gets done
+ * here so we don't race on the admin queue.
+ */
+ if (adapter->aq_pending)
+ goto watchdog_done;
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
+ i40evf_map_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_MAC_FILTER) {
+ i40evf_add_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_VLAN_FILTER) {
+ i40evf_add_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_MAC_FILTER) {
+ i40evf_del_ether_addrs(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_VLAN_FILTER) {
+ i40evf_del_vlans(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
+ i40evf_disable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) {
+ i40evf_configure_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_QUEUES) {
+ i40evf_enable_queues(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->state == __I40EVF_RUNNING)
+ i40evf_request_stats(adapter);
+
+ i40evf_irq_enable(adapter, true);
+ i40evf_fire_sw_int(adapter, 0xFF);
+
+watchdog_done:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
+ if (adapter->aq_required)
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + msecs_to_jiffies(20));
+ else
+ mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
+ schedule_work(&adapter->adminq_task);
+}
+
+/**
+ * next_queue - increment to next available tx queue
+ * @adapter: board private structure
+ * @j: queue counter
+ *
+ * Helper function for RSS programming to increment through available
+ * queus. Returns the next queue value.
+ **/
+static int next_queue(struct i40evf_adapter *adapter, int j)
+{
+ j += 1;
+
+ return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS if used
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = {
+ 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127,
+ 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0,
+ 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e,
+ 0x4954b126 };
+
+ /* Hash type is configured by the PF - we just supply the key */
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), seed[i]);
+
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ hena = I40E_DEFAULT_RSS_HENA;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ j = adapter->vsi_res->num_queue_pairs;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ j = next_queue(adapter, j);
+ lut = j;
+ j = next_queue(adapter, j);
+ lut |= j << 8;
+ j = next_queue(adapter, j);
+ lut |= j << 16;
+ j = next_queue(adapter, j);
+ lut |= j << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), lut);
+ }
+ i40e_flush(hw);
+}
+
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
+/**
+ * i40evf_reset_task - Call-back task to handle hardware reset
+ * @work: pointer to work_struct
+ *
+ * During reset we need to shut down and reinitialize the admin queue
+ * before we can use it to communicate with the PF again. We also clear
+ * and reinit the rings because that context is lost as well.
+ **/
+static void i40evf_reset_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ reset_task);
+ struct i40e_hw *hw = &adapter->hw;
+ int i = 0, err;
+ uint32_t rstat_val;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ udelay(500);
+
+ if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
+ dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ i40evf_request_reset(adapter);
+ }
+
+ /* poll until we see the reset actually happen */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val != I40E_VFR_VFACTIVE)
+ break;
+ else
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ goto continue_reset; /* act like the reset happened */
+ }
+
+ /* wait until the reset is complete and the PF is responding to us */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val == I40E_VFR_VFACTIVE)
+ break;
+ else
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ /* reset never finished */
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
+ rstat_val);
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return; /* Do not attempt to reinit. It's dead, Jim. */
+ }
+
+continue_reset:
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_RESETTING;
+
+ /* kill and reinit the admin queue */
+ if (i40evf_shutdown_adminq(hw))
+ dev_warn(&adapter->pdev->dev,
+ "%s: Failed to destroy the Admin Queue resources\n",
+ __func__);
+ err = i40evf_init_adminq(hw);
+ if (err)
+ dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n",
+ __func__, err);
+
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ i40evf_map_queues(adapter);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ mod_timer(&adapter->watchdog_timer, jiffies + 2);
+
+ if (netif_running(adapter->netdev)) {
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto reset_err;
+
+ i40evf_irq_enable(adapter, true);
+ }
+ return;
+reset_err:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+ i40evf_close(adapter->netdev);
+}
+
+/**
+ * i40evf_adminq_task - worker thread to clean the admin queue
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40evf_adminq_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter =
+ container_of(work, struct i40evf_adapter, adminq_task);
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ struct i40e_virtchnl_msg *v_msg;
+ i40e_status ret;
+ u32 val, oldval;
+ u16 pending;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+ do {
+ ret = i40evf_clean_arq_element(hw, &event, &pending);
+ if (ret)
+ break; /* No event to process or error cleaning ARQ */
+
+ i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
+ v_msg->v_retval, event.msg_buf,
+ event.msg_size);
+ if (pending != 0) {
+ dev_info(&adapter->pdev->dev,
+ "%s: ARQ: Pending events %d\n",
+ __func__, pending);
+ memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
+ }
+ } while (pending);
+
+ /* check for error indications */
+ val = rd32(hw, hw->aq.arq.len);
+ oldval = val;
+ if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.arq.len, val);
+
+ val = rd32(hw, hw->aq.asq.len);
+ oldval = val;
+ if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.asq.len, val);
+
+ /* re-enable Admin queue interrupt cause */
+ i40evf_misc_irq_enable(adapter);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->tx_rings[i]->desc)
+ i40evf_free_tx_resources(adapter->tx_rings[i]);
+
+}
+
+/**
+ * i40evf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[i]->count = adapter->tx_desc_count;
+ err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Tx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * i40evf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->count = adapter->rx_desc_count;
+ err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
+ if (!err)
+ continue;
+ dev_err(&adapter->pdev->dev,
+ "%s: Allocation for Rx Queue %u failed\n",
+ __func__, i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++)
+ if (adapter->rx_rings[i]->desc)
+ i40evf_free_rx_resources(adapter->rx_rings[i]);
+}
+
+/**
+ * i40evf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int i40evf_open(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+ return -EIO;
+ }
+ if (adapter->state != __I40EVF_DOWN)
+ return -EBUSY;
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ /* clear any pending interrupts, may auto mask */
+ err = i40evf_request_traffic_irqs(adapter, netdev->name);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_req_irq;
+
+ i40evf_irq_enable(adapter, true);
+
+ return 0;
+
+err_req_irq:
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+err_setup_rx:
+ i40evf_free_all_rx_resources(adapter);
+err_setup_tx:
+ i40evf_free_all_tx_resources(adapter);
+
+ return err;
+}
+
+/**
+ * i40evf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
+ * are freed, along with all transmit and receive resources.
+ **/
+static int i40evf_close(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->state <= __I40EVF_DOWN)
+ return 0;
+
+
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+
+ i40evf_down(adapter);
+ adapter->state = __I40EVF_DOWN;
+ i40evf_free_traffic_irqs(adapter);
+
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+/**
+ * i40evf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * i40evf_reinit_locked - Software reinit
+ * @adapter: board private structure
+ *
+ * Reinititalizes the ring structures in response to a software configuration
+ * change. Roughly the same as close followed by open, but skips releasing
+ * and reallocating the interrupts.
+ **/
+void i40evf_reinit_locked(struct i40evf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ WARN_ON(in_interrupt());
+
+ i40evf_down(adapter);
+
+ /* allocate transmit descriptors */
+ err = i40evf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ /* allocate receive descriptors */
+ err = i40evf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_configure(adapter);
+
+ err = i40evf_up_complete(adapter);
+ if (err)
+ goto err_reinit;
+
+ i40evf_irq_enable(adapter, true);
+ return;
+
+err_reinit:
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+ i40evf_close(netdev);
+}
+
+/**
+ * i40evf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+ i40evf_reinit_locked(adapter);
+ return 0;
+}
+
+static const struct net_device_ops i40evf_netdev_ops = {
+ .ndo_open = i40evf_open,
+ .ndo_stop = i40evf_close,
+ .ndo_start_xmit = i40evf_xmit_frame,
+ .ndo_get_stats = i40evf_get_stats,
+ .ndo_set_rx_mode = i40evf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40evf_set_mac,
+ .ndo_change_mtu = i40evf_change_mtu,
+ .ndo_tx_timeout = i40evf_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+};
+
+/**
+ * i40evf_check_reset_complete - check that VF reset is complete
+ * @hw: pointer to hw struct
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int i40evf_check_reset_complete(struct i40e_hw *hw)
+{
+ u32 rstat;
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ rstat = rd32(hw, I40E_VFGEN_RSTAT);
+ if (rstat == I40E_VFR_VFACTIVE)
+ return 0;
+ udelay(10);
+ }
+ return -EBUSY;
+}
+
+/**
+ * i40evf_init_task - worker thread to perform delayed initialization
+ * @work: pointer to work_struct containing our data
+ *
+ * This task completes the work that was begun in probe. Due to the nature
+ * of VF-PF communications, we may need to wait tens of milliseconds to get
+ * reponses back from the PF. Rather than busy-wait in probe and bog down the
+ * whole system, we'll do it in a task so we can sleep.
+ * This task only runs during driver init. Once we've established
+ * communications with the PF driver and set up our netdev, the watchdog
+ * takes over.
+ **/
+static void i40evf_init_task(struct work_struct *work)
+{
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ init_task.work);
+ struct net_device *netdev = adapter->netdev;
+ struct i40evf_mac_filter *f;
+ struct i40e_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int i, err, bufsz;
+
+ switch (adapter->state) {
+ case __I40EVF_STARTUP:
+ /* driver loaded, probe complete */
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+ err);
+ goto err;
+ }
+ err = i40evf_check_reset_complete(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
+ err);
+ goto err;
+ }
+ hw->aq.num_arq_entries = I40EVF_AQ_LEN;
+ hw->aq.num_asq_entries = I40EVF_AQ_LEN;
+ hw->aq.arq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40EVF_MAX_AQ_BUF_SIZE;
+
+ err = i40evf_init_adminq(hw);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_api_ver(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ i40evf_shutdown_adminq(hw);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_VERSION_CHECK;
+ goto restart;
+ break;
+ case __I40EVF_INIT_VERSION_CHECK:
+ if (!i40evf_asq_done(hw)) {
+ dev_err(&pdev->dev, "Admin queue command never completed\n");
+ goto err;
+ }
+
+ /* aq msg sent, awaiting reply */
+ err = i40evf_verify_api_ver(adapter);
+ if (err) {
+ dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
+ err);
+ goto err;
+ }
+ err = i40evf_send_vf_config_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Unable send config request (%d)\n",
+ err);
+ goto err;
+ }
+ adapter->state = __I40EVF_INIT_GET_RESOURCES;
+ goto restart;
+ break;
+ case __I40EVF_INIT_GET_RESOURCES:
+ /* aq msg sent, awaiting reply */
+ if (!adapter->vf_res) {
+ bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+ (I40E_MAX_VF_VSI *
+ sizeof(struct i40e_virtchnl_vsi_resource));
+ adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
+ if (!adapter->vf_res)
+ goto err;
+ }
+ err = i40evf_get_vf_config(adapter);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto restart;
+ if (err) {
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+ err);
+ goto err_alloc;
+ }
+ adapter->state = __I40EVF_INIT_SW;
+ break;
+ default:
+ goto err_alloc;
+ }
+ /* got VF config message back from PF, now we can parse it */
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&pdev->dev, "No LAN VSI found\n");
+ goto err_alloc;
+ }
+
+ adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+
+ netdev->netdev_ops = &i40evf_netdev_ops;
+ i40evf_set_ethtool_ops(netdev);
+ netdev->watchdog_timeo = 5 * HZ;
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_GRO;
+
+ if (adapter->vf_res->vf_offload_flags
+ & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+ netdev->vlan_features = netdev->features;
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+ adapter->hw.mac.addr);
+ random_ether_addr(adapter->hw.mac.addr);
+ }
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
+
+ INIT_LIST_HEAD(&adapter->mac_filter_list);
+ INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (NULL == f)
+ goto err_sw_init;
+
+ ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
+ f->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+ list_add(&f->list, &adapter->mac_filter_list);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &i40evf_watchdog_timer;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+
+ adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
+ adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
+ err = i40evf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+ i40evf_map_rings_to_vectors(adapter);
+ i40evf_configure_rss(adapter);
+ err = i40evf_request_misc_irq(adapter);
+ if (err)
+ goto err_sw_init;
+
+ netif_carrier_off(netdev);
+
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+ adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ adapter->vsi.netdev = adapter->netdev;
+
+ if (!adapter->netdev_registered) {
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
+
+ adapter->netdev_registered = true;
+
+ netif_tx_stop_all_queues(netdev);
+
+ dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
+ if (netdev->features & NETIF_F_GRO)
+ dev_info(&pdev->dev, "GRO is enabled\n");
+
+ dev_info(&pdev->dev, "%s\n", i40evf_driver_string);
+ adapter->state = __I40EVF_DOWN;
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_misc_irq_enable(adapter);
+ return;
+restart:
+ schedule_delayed_work(&adapter->init_task,
+ msecs_to_jiffies(50));
+ return;
+
+err_register:
+ i40evf_free_misc_irq(adapter);
+err_sw_init:
+ i40evf_reset_interrupt_capability(adapter);
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ /* Things went into the weeds, so try again later */
+ if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+ return; /* do not reschedule */
+ }
+ schedule_delayed_work(&adapter->init_task, HZ * 3);
+}
+
+/**
+ * i40evf_shutdown - Shutdown the device in preparation for a reboot
+ * @pdev: pci device structure
+ **/
+static void i40evf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ i40evf_close(netdev);
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+
+#endif
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40evf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40evf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * i40evf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct i40evf_adapter *adapter = NULL;
+ struct i40e_hw *hw = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
+ }
+ }
+
+ err = pci_request_regions(pdev, i40evf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter),
+ MAX_TX_QUEUES);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+
+ hw = &adapter->hw;
+ hw->back = adapter;
+
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ adapter->state = __I40EVF_STARTUP;
+
+ /* Call save state here because it relies on the adapter struct. */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ INIT_WORK(&adapter->reset_task, i40evf_reset_task);
+ INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
+ INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
+ INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
+ schedule_delayed_work(&adapter->init_task, 10);
+
+ return 0;
+
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40evf_suspend - Power management suspend routine
+ * @pdev: PCI device information struct
+ * @state: unused
+ *
+ * Called when the system (VM) is entering sleep/suspend.
+ **/
+static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int retval = 0;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ i40evf_down(adapter);
+ rtnl_unlock();
+ }
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+/**
+ * i40evf_resume - Power managment resume routine
+ * @pdev: PCI device information struct
+ *
+ * Called when the system (VM) is resumed from sleep/suspend.
+ **/
+static int i40evf_resume(struct pci_dev *pdev)
+{
+ struct i40evf_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state clears dev->state_saved so call
+ * pci_save_state to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n");
+ return err;
+ }
+ pci_set_master(pdev);
+
+ rtnl_lock();
+ err = i40evf_set_interrupt_capability(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
+ return err;
+ }
+ err = i40evf_request_misc_irq(adapter);
+ rtnl_unlock();
+ if (err) {
+ dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
+ return err;
+ }
+
+ schedule_work(&adapter->reset_task);
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+
+#endif /* CONFIG_PM */
+/**
+ * i40evf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * i40evf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40evf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+
+ cancel_delayed_work_sync(&adapter->init_task);
+ cancel_work_sync(&adapter->reset_task);
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+ adapter->state = __I40EVF_REMOVE;
+
+ if (adapter->msix_entries) {
+ i40evf_misc_irq_disable(adapter);
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ }
+
+ del_timer_sync(&adapter->watchdog_timer);
+ flush_scheduled_work();
+
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw);
+
+ iounmap(hw->hw_addr);
+ pci_release_regions(pdev);
+
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+
+ free_netdev(netdev);
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver i40evf_driver = {
+ .name = i40evf_driver_name,
+ .id_table = i40evf_pci_tbl,
+ .probe = i40evf_probe,
+ .remove = i40evf_remove,
+#ifdef CONFIG_PM
+ .suspend = i40evf_suspend,
+ .resume = i40evf_resume,
+#endif
+ .shutdown = i40evf_shutdown,
+};
+
+/**
+ * i40e_init_module - Driver Registration Routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40evf_init_module(void)
+{
+ int ret;
+ pr_info("i40evf: %s - version %s\n", i40evf_driver_string,
+ i40evf_driver_version);
+
+ pr_info("%s\n", i40evf_copyright);
+
+ ret = pci_register_driver(&i40evf_driver);
+ return ret;
+}
+
+module_init(i40evf_init_module);
+
+/**
+ * i40e_exit_module - Driver Exit Cleanup Routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40evf_exit_module(void)
+{
+ pci_unregister_driver(&i40evf_driver);
+}
+
+module_exit(i40evf_exit_module);
+
+/* i40evf_main.c */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
new file mode 100644
index 00000000000..2dc0bac7671
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -0,0 +1,781 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40evf.h"
+#include "i40e_prototype.h"
+
+/* busy wait delay in msec */
+#define I40EVF_BUSY_WAIT_DELAY 10
+#define I40EVF_BUSY_WAIT_COUNT 50
+
+/**
+ * i40evf_send_pf_msg
+ * @adapter: adapter structure
+ * @op: virtual channel opcode
+ * @msg: pointer to message buffer
+ * @len: message length
+ *
+ * Send message to PF and print status if failure.
+ **/
+static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ i40e_status err;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return 0; /* nothing to see here, move along */
+
+ err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
+ op, err, hw->aq.asq_last_status);
+ return err;
+}
+
+/**
+ * i40evf_send_api_ver
+ * @adapter: adapter structure
+ *
+ * Send API version admin queue message to the PF. The reply is not checked
+ * in this function. Returns 0 if the message was successfully
+ * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info vvi;
+
+ vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
+ sizeof(vvi));
+}
+
+/**
+ * i40evf_verify_api_ver
+ * @adapter: adapter structure
+ *
+ * Compare API versions with the PF. Must be called after admin queue is
+ * initialized. Returns 0 if API versions match, -EIO if
+ * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ **/
+int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_version_info *pf_vvi;
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ i40e_status err;
+
+ event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+ if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
+ (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
+
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_send_vf_config_msg
+ * @adapter: adapter structure
+ *
+ * Send VF configuration request admin queue message to the PF. The reply
+ * is not checked in this function. Returns 0 if the message was
+ * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+ **/
+int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
+{
+ return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
+}
+
+/**
+ * i40evf_get_vf_config
+ * @hw: pointer to the hardware structure
+ * @len: length of buffer
+ *
+ * Get VF configuration from PF and populate hw structure. Must be called after
+ * admin queue is initialized. Busy waits until response is received from PF,
+ * with maximum timeout. Response from PF is returned in the buffer for further
+ * processing by the caller.
+ **/
+int i40evf_get_vf_config(struct i40evf_adapter *adapter)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ struct i40e_arq_event_info event;
+ u16 len;
+ i40e_status err;
+
+ len = sizeof(struct i40e_virtchnl_vf_resource) +
+ I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
+ event.msg_size = len;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = i40evf_clean_arq_element(hw, &event, NULL);
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ goto out_alloc;
+
+ err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error returned from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+
+ if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid response from PF, %d, %d\n", __func__,
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low));
+ err = -EIO;
+ goto out_alloc;
+ }
+ memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
+
+ i40e_vf_parse_hw_config(hw, adapter->vf_res);
+out_alloc:
+ kfree(event.msg_buf);
+out:
+ return err;
+}
+
+/**
+ * i40evf_configure_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF set up our (previously allocated) queues.
+ **/
+void i40evf_configure_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *vqci;
+ struct i40e_virtchnl_queue_pair_info *vqpi;
+ int pairs = adapter->vsi_res->num_queue_pairs;
+ int i, len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+ len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+ vqci = kzalloc(len, GFP_ATOMIC);
+ if (!vqci)
+ return;
+
+ vqci->vsi_id = adapter->vsi_res->vsi_id;
+ vqci->num_queue_pairs = pairs;
+ vqpi = vqci->qpair;
+ /* Size check is not needed here - HW max is 16 queue pairs, and we
+ * can fit info for 31 of them into the AQ buffer before it overflows.
+ */
+ for (i = 0; i < pairs; i++) {
+ vqpi->txq.vsi_id = vqci->vsi_id;
+ vqpi->txq.queue_id = i;
+ vqpi->txq.ring_len = adapter->tx_rings[i]->count;
+ vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
+ (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
+
+ vqpi->rxq.vsi_id = vqci->vsi_id;
+ vqpi->rxq.queue_id = i;
+ vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
+ vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
+ vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
+ vqpi++;
+ }
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ (u8 *)vqci, len);
+ kfree(vqci);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+}
+
+/**
+ * i40evf_enable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable all of our queues.
+ **/
+void i40evf_enable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+}
+
+/**
+ * i40evf_disable_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable all of our queues.
+ **/
+void i40evf_disable_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
+ vqs.rx_queues = vqs.tx_queues;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ (u8 *)&vqs, sizeof(vqs));
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+}
+
+/**
+ * i40evf_map_queues
+ * @adapter: adapter structure
+ *
+ * Request that the PF map queues to interrupt vectors. Misc causes, including
+ * admin queue, are always mapped to vector 0.
+ **/
+void i40evf_map_queues(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_irq_map_info *vimi;
+ int v_idx, q_vectors, len;
+ struct i40e_q_vector *q_vector;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
+
+ q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+
+ len = sizeof(struct i40e_virtchnl_irq_map_info) +
+ (adapter->num_msix_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ vimi = kzalloc(len, GFP_ATOMIC);
+ if (!vimi)
+ return;
+
+ vimi->num_vectors = adapter->num_msix_vectors;
+ /* Queue vectors first */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
+ vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
+ vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ }
+ /* Misc vector last - this is only for AdminQ messages */
+ vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
+ vimi->vecmap[v_idx].vector_id = 0;
+ vimi->vecmap[v_idx].txq_map = 0;
+ vimi->vecmap[v_idx].rxq_map = 0;
+
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ (u8 *)vimi, len);
+ kfree(vimi);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
+}
+
+/**
+ * i40evf_add_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to add (contiguous)
+ * @count: number of filters
+ *
+ * Request that the PF add one or more addresses to our filters.
+ **/
+void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ int len, i = 0, count = 0;
+ struct i40evf_mac_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal)
+ return;
+
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->add) {
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+}
+
+/**
+ * i40evf_del_ether_addrs
+ * @adapter: adapter structure
+ * @addrs: the MAC address filters to remove (contiguous)
+ * @count: number of filtes
+ *
+ * Request that the PF remove one or more addresses from our filters.
+ **/
+void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_ether_addr_list *veal;
+ struct i40evf_mac_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
+
+ len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+ (count * sizeof(struct i40e_virtchnl_ether_addr));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_ether_addr_list)) /
+ sizeof(struct i40e_virtchnl_ether_addr);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ veal = kzalloc(len, GFP_ATOMIC);
+ if (!veal)
+ return;
+
+ veal->vsi_id = adapter->vsi_res->vsi_id;
+ veal->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ if (f->remove) {
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ (u8 *)veal, len);
+ kfree(veal);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+}
+
+/**
+ * i40evf_add_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to add
+ * @count: number of VLANs
+ *
+ * Request that the PF add one or more VLAN filters to our VSI.
+ **/
+void i40evf_add_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ int len, i = 0, count = 0;
+ struct i40evf_vlan_filter *f;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl)
+ return;
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->add) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ f->add = false;
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * i40evf_del_vlans
+ * @adapter: adapter structure
+ * @vlans: the VLANs to remove
+ * @count: number of VLANs
+ *
+ * Request that the PF remove one or more VLAN filters from our VSI.
+ **/
+void i40evf_del_vlans(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_vlan_filter_list *vvfl;
+ struct i40evf_vlan_filter *f, *ftmp;
+ int len, i = 0, count = 0;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->remove)
+ count++;
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+ (count * sizeof(u16));
+ if (len > I40EVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
+ __func__);
+ count = (I40EVF_MAX_AQ_BUF_SIZE -
+ sizeof(struct i40e_virtchnl_vlan_filter_list)) /
+ sizeof(u16);
+ len = I40EVF_MAX_AQ_BUF_SIZE;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl)
+ return;
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->remove) {
+ vvfl->vlan_id[i] = f->vlan;
+ i++;
+ list_del(&f->list);
+ kfree(f);
+ }
+ }
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+}
+
+/**
+ * i40evf_set_promiscuous
+ * @adapter: adapter structure
+ * @flags: bitmask to control unicast/multicast promiscuous.
+ *
+ * Request that the PF enable promiscuous mode for our VSI.
+ **/
+void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
+{
+ struct i40e_virtchnl_promisc_info vpi;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
+ __func__, adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
+ vpi.vsi_id = adapter->vsi_res->vsi_id;
+ vpi.flags = flags;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ (u8 *)&vpi, sizeof(vpi));
+}
+
+/**
+ * i40evf_request_stats
+ * @adapter: adapter structure
+ *
+ * Request VSI statistics from PF.
+ **/
+void i40evf_request_stats(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_queue_select vqs;
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* no error message, this isn't crucial */
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
+ vqs.vsi_id = adapter->vsi_res->vsi_id;
+ /* queue maps are ignored for this message - only the vsi is used */
+ if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
+ (u8 *)&vqs, sizeof(vqs)))
+ /* if the request failed, don't lock out others */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+ /* Don't check CURRENT_OP - this is always higher priority */
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
+
+/**
+ * i40evf_virtchnl_completion
+ * @adapter: adapter structure
+ * @v_opcode: opcode sent by PF
+ * @v_retval: retval sent by PF
+ * @msg: message sent by PF
+ * @msglen: message length
+ *
+ * Asynchronous completion function for admin queue messages. Rather than busy
+ * wait, we fire off our requests and assume that no errors will be returned.
+ * This function handles the reply messages.
+ **/
+void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+ struct i40e_virtchnl_pf_event *vpe =
+ (struct i40e_virtchnl_pf_event *)msg;
+ switch (vpe->event) {
+ case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+ adapter->link_up =
+ vpe->event_data.link_event.link_status;
+ if (adapter->link_up && !netif_carrier_ok(netdev)) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else if (!adapter->link_up) {
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ break;
+ case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+ dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ schedule_work(&adapter->reset_task);
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
+ break;
+
+ }
+ return;
+ }
+ if (v_opcode != adapter->current_op) {
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
+ __func__, adapter->current_op, v_opcode);
+ /* We're probably completely screwed at this point, but clear
+ * the current op and try to carry on....
+ */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ return;
+ }
+ if (v_retval) {
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
+ __func__, v_retval, v_opcode);
+ }
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS: {
+ struct i40e_eth_stats *stats =
+ (struct i40e_eth_stats *)msg;
+ adapter->net_stats.rx_packets = stats->rx_unicast +
+ stats->rx_multicast +
+ stats->rx_broadcast;
+ adapter->net_stats.tx_packets = stats->tx_unicast +
+ stats->tx_multicast +
+ stats->tx_broadcast;
+ adapter->net_stats.rx_bytes = stats->rx_bytes;
+ adapter->net_stats.tx_bytes = stats->tx_bytes;
+ adapter->net_stats.tx_errors = stats->tx_errors;
+ adapter->net_stats.rx_dropped = stats->rx_discards;
+ adapter->net_stats.tx_dropped = stats->tx_discards;
+ adapter->current_stats = *stats;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
+ /* enable transmits */
+ i40evf_irq_enable(adapter, true);
+ netif_tx_start_all_queues(adapter->netdev);
+ netif_carrier_on(adapter->netdev);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
+ __func__, v_opcode);
+ break;
+ } /* switch v_opcode */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285b..5bcb2de7593 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
# more details.
#
# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 47c2d10df82..ee74f9536b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -74,11 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
- (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -113,6 +106,59 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
+ * igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ /* Check the copper medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check the other medium. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ } else {
+ ret_val = igb_check_for_link_82575(hw);
+ }
+
+ return 0;
+}
+
+/**
* igb_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
@@ -189,6 +235,29 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
else
phy->ops.get_cable_length = igb_get_cable_length_m88;
phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ igb_check_for_link_media_swap;
+ }
break;
case IGP03E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
@@ -365,6 +434,19 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
? igb_setup_copper_link_82575
: igb_setup_serdes_link_82575;
+ if (mac->type == e1000_82580) {
+ switch (hw->device_id) {
+ /* feature not supported on these id's */
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ break;
+ default:
+ hw->dev_spec._82575.mas_capable = true;
+ break;
+ }
+ }
return 0;
}
@@ -440,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -922,7 +1004,6 @@ out:
static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -946,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD; }
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -966,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -991,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
}
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1094,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1117,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1)
@@ -1130,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1141,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3))
igb_phy_init_script_igp3(hw);
- return ret_val;
+ return 0;
}
/**
@@ -1183,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1230,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1325,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1350,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1361,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1403,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
+ if ((hw->mac.type >= e1000_i210) &&
+ !(igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_pll_workaround_i210(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Initialize identification LED */
ret_val = igb_id_led_init(hw);
if (ret_val) {
@@ -1536,7 +1621,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1590,7 +1675,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1603,7 +1688,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1839,7 +1924,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1867,7 +1952,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -1919,14 +2004,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2155,7 +2240,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2173,7 +2258,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2219,7 +2304,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
{
u16 ret_val = 0;
- if (data < E1000_82580_RXPBS_TABLE_SIZE)
+ if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
ret_val = e1000_82580_rxpbs_table[data];
return ret_val;
@@ -2350,8 +2435,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2361,8 +2445,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
@@ -2439,7 +2522,7 @@ out:
static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val)
@@ -2473,7 +2556,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
**/
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 ipcnfg, eeer;
if ((hw->mac.type < e1000_i350) ||
@@ -2507,7 +2589,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
rd32(E1000_EEER);
out:
- return ret_val;
+ return 0;
}
/**
@@ -2625,15 +2707,15 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
+#ifdef CONFIG_IGB_HWMON
/**
* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Updates the temperatures in mac.thermal_sensor_data
**/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2651,7 +2733,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2675,7 +2757,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp);
}
- return status;
+ return 0;
}
/**
@@ -2685,9 +2767,8 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2713,7 +2794,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2744,9 +2825,10 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta;
}
}
- return status;
+ return 0;
}
+#endif
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b423..b407c55738f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,46 +1,42 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+ u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -68,16 +64,16 @@ extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -93,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
@@ -231,6 +226,10 @@ struct e1000_adv_tx_context_desc {
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12
@@ -266,8 +265,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
s32 igb_set_eee_i350(struct e1000_hw *);
s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 978eca31ced..217f8138851 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -44,16 +40,21 @@
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
+
/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
/* Interrupt delay cancellation */
/* Driver loaded bit for FW */
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
/* packet buffer parity error detection enabled */
/* descriptor FIFO parity error detection enable */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
#define E1000_I2CCMD_REG_ADDR_SHIFT 16
#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
#define E1000_I2CCMD_OPCODE_READ 0x08000000
@@ -98,11 +100,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -191,7 +193,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -205,6 +208,11 @@
*/
#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
#define E1000_PCS_CFG_PCS_EN 8
#define E1000_PCS_LCTL_FLV_LINK_UP 1
#define E1000_PCS_LCTL_FSV_100 2
@@ -298,39 +306,34 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* Timestamp in Rx buffer */
#define E1000_RXPBS_CFG_TS_EN 0x80000000
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -397,12 +400,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -458,7 +461,6 @@
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
-#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
@@ -524,14 +526,84 @@
#define E1000_TIMINCA_16NS_SHIFT 24
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
#define E1000_MDICNFG_PHY_MASK 0x03E00000
#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
/* PCI Express Control */
#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
@@ -932,8 +1004,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 37a9c06a6c6..ce55ea5d750 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,29 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -321,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -339,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
@@ -533,6 +528,9 @@ struct e1000_dev_spec_82575 {
bool clear_semaphore_once;
struct e1000_sfp_flags eth_flags;
bool module_plugged;
+ u8 media_port;
+ bool media_changed;
+ bool mas_capable;
};
struct e1000_hw {
@@ -562,11 +560,14 @@ struct e1000_hw {
u8 revision_id;
};
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
/* These functions must be implemented by drivers */
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c0393316a3..65d931669f8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -35,6 +31,8 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -99,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
return -E1000_ERR_NVM;
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -111,7 +109,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
{
return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -123,7 +121,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void igb_release_nvm_i210(struct e1000_hw *hw)
{
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -141,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
while (i < timeout) {
@@ -186,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ while (igb_get_hw_semaphore_i210(hw))
; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -206,10 +204,10 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -219,7 +217,7 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -227,7 +225,7 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -252,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, k, eewr = 0;
u32 attempts = 100000;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -274,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
rd32(E1000_SRWR)) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
}
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
@@ -306,10 +304,10 @@ out:
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -319,7 +317,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -327,7 +325,7 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -364,14 +362,14 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
if (word_address == address) {
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
+ hw_dbg("Read INVM Word 0x%02x = %x\n",
address, *data);
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
}
- if (status != E1000_SUCCESS)
+ if (status)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
@@ -387,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
u16 words __always_unused, u16 *data)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
@@ -397,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
&data[1]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
&data[2]);
- if (ret_val != E1000_SUCCESS)
+ if (ret_val)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_0_2_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = ID_LED_RESERVED_FFFF;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -487,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
version = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have odd version location
@@ -505,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
(i != 1))) {
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have even version location
@@ -514,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
- if (status == E1000_SUCCESS) {
+ if (!status) {
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -532,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have image type in first location used */
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
invm_ver->invm_img_type = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have image type in first location used */
@@ -541,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
@@ -555,12 +554,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
@@ -590,9 +589,9 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
@@ -601,12 +600,12 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
* EEPROM read fails
*/
ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
@@ -624,7 +623,7 @@ s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
checksum = (u16) NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
@@ -653,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
@@ -684,9 +683,9 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
**/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u32 flup;
ret_val = igb_pool_flash_update_done_i210(hw);
@@ -699,7 +698,7 @@ s32 igb_update_flash_i210(struct e1000_hw *hw)
wr32(E1000_EECD, flup);
ret_val = igb_pool_flash_update_done_i210(hw);
- if (ret_val == E1000_SUCCESS)
+ if (ret_val)
hw_dbg("Flash update complete\n");
else
hw_dbg("Flash update time out\n");
@@ -752,7 +751,7 @@ out:
static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
if (ret_val)
@@ -835,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
}
return ret_val;
}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = rd32(E1000_WUC);
+ mdicnfg = rd32(E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ wr32(E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = 0;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = rd32(E1000_CTRL);
+ wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+
+ wr32(E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ usleep_range(1000, 2000);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ wr32(E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ wr32(E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ wr32(E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index dde3c4b7ea9..3442b6357d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,53 +1,39 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
- u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -93,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed5067..2a88595f956 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -443,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -867,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -930,11 +925,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
- hw_dbg("Flow Control = FULL.\r\n");
+ hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
@@ -949,7 +943,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
- hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
+ hw_dbg("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
@@ -963,7 +957,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
@@ -989,10 +983,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(hw->fc.requested_mode == e1000_fc_tx_pause) ||
(hw->fc.strict_ieee)) {
hw->fc.current_mode = e1000_fc_none;
- hw_dbg("Flow Control = NONE.\r\n");
+ hw_dbg("Flow Control = NONE.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+ hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Now we need to do one last check... If we auto-
@@ -1267,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1300,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc60..ea24961b0d7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
@@ -86,6 +82,6 @@ enum e1000_mng_mode {
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf..162cc49345d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf..d20af6b2f58 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3db91..e8280d0d7f0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,29 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -481,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
@@ -802,5 +798,4 @@ etrack_id:
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
}
- return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b7419cb9..febc9cdb739 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -33,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index e7266759a10..c1bb64d8366 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -394,77 +390,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
}
/**
- * e1000_write_sfp_data_byte - Writes SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to write to
- * @data: data to write
- *
- * Writes one byte to SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- hw_dbg("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
- /* The programming interface is 16 bits wide
- * so we need to read the whole word first
- * then update appropriate byte lane and write
- * the updated word back.
- */
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing
- * with an EEPROM to write the data given.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
- /* Set a command to read single word */
- wr32(E1000_I2CCMD, i2ccmd);
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- /* Poll the ready bit to see if lastly
- * launched I2C operation completed
- */
- i2ccmd = rd32(E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY) {
- /* Check if this is READ or WRITE phase */
- if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
- E1000_I2CCMD_OPCODE_READ) {
- /* Write the selected byte
- * lane and update whole word
- */
- data_local = i2ccmd & 0xFF00;
- data_local |= data;
- i2ccmd = ((offset <<
- E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE | data_local);
- wr32(E1000_I2CCMD, i2ccmd);
- } else {
- break;
- }
- }
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- return 0;
-}
-
-/**
* igb_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -708,11 +633,6 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
goto out;
}
- if (phy->type == e1000_phy_i210) {
- ret_val = igb_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
- }
out:
return ret_val;
@@ -806,6 +726,9 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
hw_dbg("Error committing the PHY changes\n");
return ret_val;
}
+ ret_val = igb_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
return 0;
}
@@ -998,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -1730,7 +1652,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ mdelay(usec_interval/1000);
+ else
+ udelay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
@@ -2279,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
void igb_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
@@ -2302,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
void igb_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f2095..7af4ffab028 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
@@ -70,7 +66,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
@@ -156,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
#define GS40G_LINE_LB 0x4000
/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53a..f5ba4e4eafb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -41,6 +37,7 @@
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -69,6 +66,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
@@ -102,6 +100,14 @@
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
+#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
+#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
+#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
+#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@@ -187,6 +193,10 @@
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
: (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -293,9 +303,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -349,16 +359,29 @@
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
-#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
-#define rd32(reg) (readl(hw->hw_addr + reg))
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ if (!E1000_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
#define wrfl() ((void)rd32(E1000_STATUS))
#define array_wr32(reg, offset, value) \
- (writel(value, hw->hw_addr + reg + ((offset) << 2)))
+ wr32((reg) + ((offset) << 2), (value))
+
#define array_rd32(reg, offset) \
(readl(hw->hw_addr + reg + ((offset) << 2)))
@@ -397,4 +420,6 @@
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
+#define E1000_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 6807b098eda..06102d1f7c0 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,30 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -41,6 +36,8 @@
#include <linux/if_vlan.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
struct igb_adapter;
@@ -67,6 +64,7 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_Q_VECTORS 8
+#define MAX_MSIX_ENTRIES 10
/* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES 8
@@ -127,9 +125,9 @@ struct vf_data_storage {
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 4)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
+ (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -196,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -239,7 +238,6 @@ struct igb_ring {
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
- unsigned long last_rx_timestamp;
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
@@ -337,8 +335,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif
@@ -355,7 +355,7 @@ struct igb_adapter {
unsigned int flags;
unsigned int num_q_vectors;
- struct msix_entry *msix_entries;
+ struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
@@ -430,8 +430,10 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -440,7 +442,7 @@ struct igb_adapter {
char fw_version[32];
#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon_buff;
bool ets;
#endif
struct i2c_algo_bit_data i2c_algo;
@@ -450,6 +452,9 @@ struct igb_adapter {
u8 rss_indir_tbl[IGB_RETA_SIZE];
unsigned long link_check_timeout;
+ int copper_tries;
+ struct e1000_info ei;
+ u16 eee_advert;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -462,6 +467,17 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
+#define IGB_FLAG_MEDIA_RESET (1 << 10)
+#define IGB_FLAG_MAS_CAPABLE (1 << 11)
+#define IGB_FLAG_MAS_ENABLE (1 << 12)
+#define IGB_FLAG_HAS_MSIX (1 << 13)
+#define IGB_FLAG_EEE (1 << 14)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -473,7 +489,8 @@ struct igb_adapter {
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
- __IGB_DOWN
+ __IGB_DOWN,
+ __IGB_PTP_TX_IN_PROGRESS,
};
enum igb_boards {
@@ -483,59 +500,41 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
- struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
- struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
- unsigned char *va,
- struct sk_buff *skb);
-static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
- igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
-
- /* Update the last_rx_timestamp timer in order to enable watchdog check
- * for error case of latched timestamp on a dropped packet.
- */
- rx_ring->last_rx_timestamp = jiffies;
-}
-
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ struct sk_buff *skb);
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
#ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 151e00cad11..c737d1f4083 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -145,7 +141,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ u32 speed;
+ status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
@@ -169,13 +167,22 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
} else {
ecmd->supported = (SUPPORTED_FIBRE |
+ SUPPORTED_1000baseKX_Full |
SUPPORTED_Autoneg |
SUPPORTED_Pause);
- ecmd->advertising = ADVERTISED_FIBRE;
-
- if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
- ecmd->supported |= SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ ecmd->advertising = (ADVERTISED_FIBRE |
+ ADVERTISED_1000baseKX_Full);
+ if (hw->mac.type == e1000_i354) {
+ if ((hw->device_id ==
+ E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported |= SUPPORTED_2500baseX_Full;
+ ecmd->supported &=
+ ~SUPPORTED_1000baseKX_Full;
+ ecmd->advertising |= ADVERTISED_2500baseX_Full;
+ ecmd->advertising &=
+ ~ADVERTISED_1000baseKX_Full;
+ }
}
if (eth_flags->e100_base_fx) {
ecmd->supported |= SUPPORTED_100baseT_Full;
@@ -187,41 +194,35 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
}
-
if (hw->mac.autoneg != 1)
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- if (hw->fc.requested_mode == e1000_fc_full)
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
ecmd->advertising |= ADVERTISED_Pause;
- else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ break;
+ case e1000_fc_rx_pause:
ecmd->advertising |= (ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
- else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ break;
+ case e1000_fc_tx_pause:
ecmd->advertising |= ADVERTISED_Asym_Pause;
- else
+ break;
+ default:
ecmd->advertising &= ~(ADVERTISED_Pause |
ADVERTISED_Asym_Pause);
-
- status = rd32(E1000_STATUS);
-
+ }
if (status & E1000_STATUS_LU) {
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->supported = SUPPORTED_2500baseX_Full;
- ecmd->advertising = ADVERTISED_2500baseX_Full;
- ecmd->speed = SPEED_2500;
- } else {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
- }
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
} else {
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
@@ -229,10 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
-
+ ethtool_cmd_speed_set(ecmd, speed);
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -284,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -397,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -771,8 +772,10 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
+ if ((hw->mac.type >= e1000_i210) &&
+ !igb_get_flash_presence_i210(hw)) {
return -EOPNOTSUPP;
+ }
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -882,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1056,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1099,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1128,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1145,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1166,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1192,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1202,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1214,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1381,16 +1388,16 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1408,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1455,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1477,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1499,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1511,10 +1518,10 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
free_irq(adapter->msix_entries[0].vector, adapter);
else
free_irq(irq, adapter);
@@ -1659,8 +1666,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
-
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1725,7 +1733,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
@@ -1943,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -1976,6 +1986,10 @@ static void igb_diag_test(struct net_device *netdev,
bool if_running = netif_running(netdev);
set_bit(__IGB_TESTING, &adapter->state);
+
+ /* can't do offline tests on media switching devices */
+ if (adapter->hw.dev_spec._82575.mas_capable)
+ eth_test->flags &= ~ETH_TEST_FL_OFFLINE;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -2055,14 +2069,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
wol->wolopts = 0;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
@@ -2262,15 +2277,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@@ -2278,13 +2293,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
@@ -2342,6 +2357,11 @@ static int igb_get_ts_info(struct net_device *dev,
{
struct igb_adapter *adapter = netdev_priv(dev);
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
switch (adapter->hw.mac.type) {
case e1000_82575:
info->so_timestamping =
@@ -2363,11 +2383,6 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- if (adapter->ptp_clock)
- info->phc_index = ptp_clock_index(adapter->ptp_clock);
- else
- info->phc_index = -1;
-
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -2402,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2414,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2576,7 +2595,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ipcnfg, eeer, ret_val;
+ u32 ret_val;
u16 phy_data;
if ((hw->mac.type < e1000_i350) ||
@@ -2585,16 +2604,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->supported = (SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full);
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
- ipcnfg = rd32(E1000_IPCNFG);
- eeer = rd32(E1000_EEER);
+ eeer = rd32(E1000_EEER);
- /* EEE status on negotiated link */
- if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
- edata->advertised = ADVERTISED_1000baseT_Full;
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
- if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
- edata->advertised |= ADVERTISED_100baseT_Full;
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
/* EEE Link Partner Advertised */
switch (hw->mac.type) {
@@ -2605,8 +2633,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -ENODATA;
edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
break;
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2622,12 +2650,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
break;
}
- if (eeer & E1000_EEER_EEE_NEG)
- edata->eee_active = true;
-
edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
- if (eeer & E1000_EEER_TX_LPI_EN)
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
edata->tx_lpi_enabled = true;
/* Report correct negotiated EEE status for devices that
@@ -2675,9 +2701,10 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (eee_curr.advertised != edata->advertised) {
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
dev_err(&adapter->pdev->dev,
- "Setting EEE Advertisement is not supported\n");
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
return -EINVAL;
}
@@ -2687,9 +2714,14 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
- igb_set_eee_i350(hw);
+ adapter->flags |= IGB_FLAG_EEE;
+ if (hw->mac.type == e1000_i350)
+ igb_set_eee_i350(hw);
+ else
+ igb_set_eee_i354(hw);
/* reset link */
if (netif_running(netdev))
@@ -2706,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -2716,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
/* Check whether we support SFF-8472 or not */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
@@ -2748,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 *dataword;
u16 first_word, last_word;
int i = 0;
@@ -2767,9 +2799,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS)
+ if (status) {
/* Error occurred while reading module */
+ kfree(dataword);
return -EIO;
+ }
be16_to_cpus(&dataword[i]);
}
@@ -2798,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
@@ -2844,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2877,6 +2912,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
return 0;
}
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int max_combined = 0;
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ max_combined = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_combined = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_combined = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ case e1000_i354:
+ default:
+ max_combined = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = igb_max_channels(adapter);
+
+ /* Report info for other vector */
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ unsigned int count = ch->combined_count;
+
+ /* Verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* Verify other_count is valid and has not been changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* Verify the number of channels doesn't exceed hw limits */
+ if (count > igb_max_channels(adapter))
+ return -EINVAL;
+
+ if (count != adapter->rss_queues) {
+ adapter->rss_queues = count;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match the new configuration.
+ */
+ return igb_reinit_queues(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2911,13 +3028,15 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
- .get_rxfh_indir = igb_get_rxfh_indir,
- .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_rxfh = igb_get_rxfh,
+ .set_rxfh = igb_set_rxfh,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
void igb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ netdev->ethtool_ops = &igb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967ae..44b6a68f1af 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
@@ -117,29 +113,29 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *igb_attr;
- n_attr = adapter->igb_hwmon_buff.n_hwmon;
- igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->igb_hwmon_buff->n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IGB_HWMON_TYPE_LOC:
igb_attr->dev_attr.show = igb_hwmon_show_location;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IGB_HWMON_TYPE_TEMP:
igb_attr->dev_attr.show = igb_hwmon_show_temp;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IGB_HWMON_TYPE_CAUTION:
igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IGB_HWMON_TYPE_MAX:
igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
snprintf(igb_attr->name, sizeof(igb_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -154,30 +150,16 @@ static int igb_add_hwmon_attr(struct igb_adapter *adapter,
igb_attr->dev_attr.attr.mode = S_IRUGO;
igb_attr->dev_attr.attr.name = igb_attr->name;
sysfs_attr_init(&igb_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &igb_attr->dev_attr);
- if (rc == 0)
- ++adapter->igb_hwmon_buff.n_hwmon;
- return rc;
+ adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+ ++adapter->igb_hwmon_buff->n_hwmon;
+
+ return 0;
}
static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->igb_hwmon_buff.hwmon_list);
-
- if (adapter->igb_hwmon_buff.device)
- hwmon_device_unregister(adapter->igb_hwmon_buff.device);
}
/* called from igb_main.c */
@@ -189,11 +171,11 @@ void igb_sysfs_exit(struct igb_adapter *adapter)
/* called from igb_main.c */
int igb_sysfs_init(struct igb_adapter *adapter)
{
- struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ struct hwmon_buff *igb_hwmon;
+ struct i2c_client *client;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
- struct i2c_client *client = NULL;
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -201,34 +183,16 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Don't create thermal hwmon interface if no sensors present */
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
- if (rc)
- goto exit;
-
- /* init i2c_client */
- client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
- if (client == NULL) {
- dev_info(&adapter->pdev->dev,
- "Failed to create new i2c device..\n");
+ if (rc)
goto exit;
- }
- adapter->i2c_client = client;
- /* Allocation space for max attributes
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = E1000_MAX_SENSORS * 4;
- igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!igb_hwmon->hwmon_list) {
+ igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+ GFP_KERNEL);
+ if (!igb_hwmon) {
rc = -ENOMEM;
- goto err;
- }
-
- igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(igb_hwmon->device)) {
- rc = PTR_ERR(igb_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->igb_hwmon_buff = igb_hwmon;
for (i = 0; i < E1000_MAX_SENSORS; i++) {
@@ -240,11 +204,39 @@ int igb_sysfs_init(struct igb_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
- rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
+ }
+
+ /* init i2c_client */
+ client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+ if (client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device.\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+ adapter->i2c_client = client;
+
+ igb_hwmon->groups[0] = &igb_hwmon->group;
+ igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ client->name,
+ igb_hwmon,
+ igb_hwmon->groups);
+ if (IS_ERR(hwmon_dev)) {
+ rc = PTR_ERR(hwmon_dev);
+ goto err;
}
goto exit;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8cf44f2a8cc..a9537ba7a5a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -70,13 +66,13 @@ char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
+ "Copyright (c) 2007-2014 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -118,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -142,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -160,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -173,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -182,6 +178,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
#endif
#ifdef CONFIG_PM
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -751,6 +743,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+ struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (E1000_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igb->netdev;
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -802,7 +816,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
- if (!adapter->msix_entries && msix_vector == 0)
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
@@ -982,43 +996,64 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
-{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
-}
-
/**
* igb_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vector.
**/
static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * igb_reset_q_vector - Reset config for interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be reset
+ *
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ /* Coming from igb_set_interrupt_capability, the vectors are not yet
+ * allocated. So, q_vector is NULL so we should stop here.
+ */
+ if (!q_vector)
+ return;
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* igb_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ pci_disable_msix(adapter->pdev);
+ else if (adapter->flags & IGB_FLAG_HAS_MSI)
+ pci_disable_msi(adapter->pdev);
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
@@ -1037,8 +1072,10 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
@@ -1069,6 +1106,7 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
if (!msix)
goto msi_only;
+ adapter->flags |= IGB_FLAG_HAS_MSIX;
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
@@ -1089,25 +1127,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
/* add 1 vector for link status interrupts */
numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
-
- if (!adapter->msix_entries)
- goto msi_only;
-
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
return;
igb_reset_interrupt_capability(adapter);
/* If we can't do MSI-X, try MSI */
msi_only:
+ adapter->flags &= ~IGB_FLAG_HAS_MSIX;
#ifdef CONFIG_PCI_IOV
/* disable SR-IOV for non MSI-X configurations */
if (adapter->vf_data) {
@@ -1171,7 +1205,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
@@ -1223,6 +1259,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
+ u64_stats_init(&ring->tx_syncp);
+ u64_stats_init(&ring->tx_syncp2);
+
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -1245,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1256,6 +1294,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
+ u64_stats_init(&ring->rx_syncp);
+
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
}
@@ -1296,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1364,7 +1405,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
int err = 0;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
err = igb_request_msix(adapter);
if (!err)
goto request_done;
@@ -1408,7 +1449,7 @@ request_done:
static void igb_free_irq(struct igb_adapter *adapter)
{
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int vector = 0, i;
free_irq(adapter->msix_entries[vector++].vector, adapter);
@@ -1433,8 +1474,9 @@ static void igb_irq_disable(struct igb_adapter *adapter)
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1444,8 +1486,9 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wr32(E1000_IAM, 0);
wr32(E1000_IMC, ~0);
wrfl();
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1461,9 +1504,10 @@ static void igb_irq_enable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1601,6 +1645,73 @@ static void igb_power_down_link(struct igb_adapter *adapter)
}
/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ connsw = rd32(E1000_CONNSW);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 4) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ wr32(E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (!swap_now)
+ return;
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to fiber/serdes\n");
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ netdev_info(adapter->netdev,
+ "MAS: changing media to copper\n");
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ netdev_err(adapter->netdev,
+ "AMS: Invalid media type found, returning\n");
+ break;
+ }
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
* igb_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
**/
@@ -1617,7 +1728,7 @@ int igb_up(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
igb_configure_msix(adapter);
else
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1629,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1639,6 +1751,10 @@ int igb_up(struct igb_adapter *adapter)
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
return 0;
}
@@ -1667,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1707,12 +1823,43 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static s32 igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+ s32 ret_val = 0;
+
+ connsw = rd32(E1000_CONNSW);
+ if (!(hw->phy.media_type == e1000_media_type_copper))
+ return ret_val;
+
+ /* configure for SerDes media detect */
+ if (!(connsw & E1000_CONNSW_SERDESD)) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ wr32(E1000_CONNSW, connsw);
+ wrfl();
+ } else if (connsw & E1000_CONNSW_SERDESD) {
+ /* already SerDes, no need to enable anything */
+ return ret_val;
+ } else {
+ netdev_info(adapter->netdev,
+ "MAS: Unable to configure feature, disabling..\n");
+ adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
+ }
+ return ret_val;
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1809,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -1824,6 +1972,16 @@ void igb_reset(struct igb_adapter *adapter)
hw->mac.ops.reset_hw(hw);
wr32(E1000_WUC, 0);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ /* need to resetup here after media swap */
+ adapter->ei.get_invariants(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (igb_enable_mas(adapter))
+ dev_err(&pdev->dev,
+ "Error enabling Media Auto Sense\n");
+ }
if (hw->mac.ops.init_hw(hw))
dev_err(&pdev->dev, "Hardware Error\n");
@@ -1846,6 +2004,21 @@ void igb_reset(struct igb_adapter *adapter)
}
}
#endif
+ /* Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ igb_set_eee_i350(hw);
+ break;
+ case e1000_i354:
+ igb_set_eee_i354(hw);
+ break;
+ default:
+ break;
+ }
+ }
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -1911,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1966,7 +2139,58 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
break;
}
- return;
+}
+
+/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
+
+ hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3) {
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ netdev_info(adapter->netdev,
+ "MAS: Enabling Media Autosense for port %d\n",
+ hw->bus.func);
+ }
+ break;
+ default:
+ /* Shouldn't get here */
+ netdev_err(adapter->netdev,
+ "MAS: Invalid port configuration, returning\n");
+ break;
+ }
}
/**
@@ -1975,11 +2199,11 @@ void igb_set_fw_version(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
+ return 0;
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structue that implements
@@ -2016,7 +2240,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
@@ -2034,21 +2257,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2079,11 +2296,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
-
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
+ hw->hw_addr = pci_iomap(pdev, 0, 0);
if (!hw->hw_addr)
goto err_ioremap;
@@ -2093,8 +2307,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
/* PCI config space info */
hw->vendor_id = pdev->vendor;
@@ -2219,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+ wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2311,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -2350,6 +2571,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = false;
}
#endif
+ /* Check if Media Autosense is enabled */
+ adapter->ei = *ei;
+ if (hw->dev_spec._82575.mas_capable)
+ igb_init_mas(adapter);
+
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2382,26 +2608,39 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
dev_info(&pdev->dev,
"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
adapter->num_rx_queues, adapter->num_tx_queues);
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- igb_set_eee_i350(hw);
- break;
- case e1000_i354:
- if (hw->phy.media_type == e1000_media_type_copper) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = igb_set_eee_i350(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ break;
+ case e1000_i354:
if ((rd32(E1000_CTRL_EXT) &
- E1000_CTRL_EXT_LINK_MODE_SGMII))
- igb_set_eee_i354(hw);
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = igb_set_eee_i354(hw);
+ if ((!err) &&
+ (!hw->dev_spec._82575.eee_disable)) {
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ adapter->flags |= IGB_FLAG_EEE;
+ }
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
}
-
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -2416,7 +2655,7 @@ err_eeprom:
iounmap(hw->flash_address);
err_sw_init:
igb_clear_interrupt_scheme(adapter);
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2429,7 +2668,7 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_disable_sriov(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2470,27 +2709,19 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
- if (!adapter->msix_entries) {
+ if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
err = -EPERM;
goto out;
}
-
if (!num_vfs)
goto out;
- else if (old_vfs && old_vfs == num_vfs)
- goto out;
- else if (old_vfs && old_vfs != num_vfs)
- err = igb_disable_sriov(pdev);
- if (err)
- goto out;
-
- if (num_vfs > 7) {
- err = -EPERM;
- goto out;
- }
-
- adapter->vfs_allocated_count = num_vfs;
+ if (old_vfs) {
+ dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+ old_vfs, max_vfs);
+ adapter->vfs_allocated_count = old_vfs;
+ } else
+ adapter->vfs_allocated_count = num_vfs;
adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
sizeof(struct vf_data_storage), GFP_KERNEL);
@@ -2504,10 +2735,12 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
goto out;
}
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
-
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs) {
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -2589,7 +2822,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_disable_sriov(pdev);
#endif
- iounmap(hw->hw_addr);
+ pci_iounmap(pdev, hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
@@ -2623,7 +2856,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
return;
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_pci_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */
}
@@ -2847,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3018,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3159,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3200,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3281,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -3341,6 +3577,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
vmolr = rd32(E1000_VMOLR(vfn));
vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (hw->mac.type == e1000_i350) {
+ u32 dvmolr;
+
+ dvmolr = rd32(E1000_DVMOLR(vfn));
+ dvmolr |= E1000_DVMOLR_STRVLAN;
+ wr32(E1000_DVMOLR(vfn), dvmolr);
+ }
if (aupe)
vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
@@ -3821,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -3840,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -3941,6 +4185,7 @@ static void igb_watchdog_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
u32 link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
@@ -3951,20 +4196,35 @@ static void igb_watchdog_task(struct work_struct *work)
link = false;
}
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = rd32(E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -3974,6 +4234,15 @@ static void igb_watchdog_task(struct work_struct *work)
(ctrl & E1000_CTRL_RFCE) ? "RX" :
(ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
+ /* disable EEE if enabled */
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (adapter->link_duplex == HALF_DUPLEX)) {
+ dev_info(&adapter->pdev->dev,
+ "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+ adapter->hw.dev_spec._82575.eee_disable = true;
+ adapter->flags &= ~IGB_FLAG_EEE;
+ }
+
/* check if SmartSpeed worked */
igb_check_downshift(hw);
if (phy->speed_downgraded)
@@ -3981,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4016,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4032,8 +4297,27 @@ static void igb_watchdog_task(struct work_struct *work)
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
@@ -4062,8 +4346,9 @@ static void igb_watchdog_task(struct work_struct *work)
}
/* Cause software interrupt to ensure Rx ring is cleaned */
- if (adapter->msix_entries) {
+ if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4103,8 +4388,7 @@ enum latency_range {
* were determined based on theoretical maximum wire speed and testing
* data, in order to minimize response time while increasing bulk
* throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: This function is called only when operating in a multiqueue
* receive environment.
**/
@@ -4178,8 +4462,7 @@ clear_counts:
* based on theoretical maximum wire speed and thresholds were set based
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
+ * This functionality is controlled by ethtool's coalescing settings.
* NOTE: These calculations are only valid when operating in a single-
* queue environment.
**/
@@ -4205,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4327,6 +4609,7 @@ static int igb_tso(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
@@ -4334,16 +4617,14 @@ static int igb_tso(struct igb_ring *tx_ring,
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -4398,13 +4679,14 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -4685,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -4702,12 +4985,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- if (!(adapter->ptp_tx_skb)) {
+ if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -4718,6 +5000,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
}
}
+ skb_tx_timestamp(skb);
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -4862,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -4915,8 +5199,10 @@ void igb_update_stats(struct igb_adapter *adapter,
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
+ u32 rqdpc = rd32(E1000_RQDPC(i));
+ if (hw->mac.type >= e1000_i210)
+ wr32(E1000_RQDPC(i), 0);
if (rqdpc) {
ring->rx_stats.drops += rqdpc;
@@ -4924,10 +5210,10 @@ void igb_update_stats(struct igb_adapter *adapter,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -4940,10 +5226,10 @@ void igb_update_stats(struct igb_adapter *adapter,
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -5341,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5392,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5490,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5518,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5622,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -5708,7 +5998,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, 6);
+ memcpy(addr, vf_mac, ETH_ALEN);
igb_write_mbx(hw, msgbuf, 3, vf);
}
@@ -5983,7 +6273,7 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
}
if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
@@ -6214,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6417,7 +6707,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
/**
@@ -6487,7 +6779,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -6496,7 +6788,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -6510,7 +6802,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -6674,11 +6966,14 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -6700,7 +6995,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
- do {
+ while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
@@ -6752,7 +7047,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* update budget accounting */
total_packets++;
- } while (likely(total_packets < budget));
+ }
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
@@ -6767,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -6888,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -6911,13 +7206,29 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+ case SIOCGHWTSTAMP:
+ return igb_ptp_get_ts_config(netdev, ifr);
case SIOCSHWTSTAMP:
- return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_set_ts_config(netdev, ifr);
default:
return -EOPNOTSUPP;
}
}
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -7281,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
if (netif_running(netdev))
igb_close(netdev);
+ else
+ igb_reset(adapter);
igb_clear_interrupt_scheme(adapter);
@@ -7350,7 +7663,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (adapter->msix_entries)
+ if (adapter->flags & IGB_FLAG_HAS_MSIX)
wr32(E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
@@ -7587,7 +7900,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
}
}
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7596,15 +7910,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (hw->mac.type != e1000_82576)
return -EOPNOTSUPP;
+ if (min_tx_rate)
+ return -EINVAL;
+
actual_link_speed = igb_link_mbps(adapter->link_speed);
if ((vf >= adapter->vfs_allocated_count) ||
(!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
+ (max_tx_rate < 0) ||
+ (max_tx_rate > actual_link_speed))
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vf_data[vf].tx_rate = (u16)tx_rate;
- igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
return 0;
}
@@ -7633,7 +7951,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7644,7 +7962,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7669,11 +7988,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -7763,6 +8084,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
@@ -7791,8 +8113,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
swfw_mask = E1000_SWFW_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -7802,7 +8123,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C;
else {
*data = status;
- return E1000_SUCCESS;
+ return 0;
}
}
@@ -7827,7 +8148,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (!this_client)
return E1000_ERR_I2C;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -7835,7 +8156,29 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (status)
return E1000_ERR_I2C;
else
- return E1000_SUCCESS;
+ return 0;
+
+}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_reset_interrupt_capability(adapter);
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igb_open(netdev);
+
+ return err;
}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3dc535..794c139f0cc 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -75,6 +74,8 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+
/* SYSTIM read access for the 82576 */
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
@@ -359,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
**/
-void igb_ptp_tx_work(struct work_struct *work)
+static void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
ptp_tx_work);
@@ -386,8 +387,9 @@ void igb_ptp_tx_work(struct work_struct *work)
IGB_PTP_TX_TIMEOUT)) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return;
}
@@ -425,10 +427,8 @@ static void igb_ptp_overflow_check(struct work_struct *work)
void igb_ptp_rx_hang(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct igb_ring *rx_ring;
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
unsigned long rx_event;
- int n;
if (hw->mac.type != e1000_82576)
return;
@@ -443,18 +443,15 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
/* Determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check;
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
- }
+ if (time_after(adapter->last_rx_timestamp, rx_event))
+ rx_event = adapter->last_rx_timestamp;
/* Only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
rd32(E1000_RXSTMPH);
adapter->last_rx_ptp_check = jiffies;
adapter->rx_hwtstamp_cleared++;
- dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
}
}
@@ -466,7 +463,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
**/
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
@@ -479,6 +476,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -537,13 +535,35 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
}
/**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * igb_ptp_get_ts_config - get hardware time stamping config
* @netdev:
* @ifreq:
- * @cmd:
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igb_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -556,13 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- **/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ */
+static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -570,14 +588,11 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -586,7 +601,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
@@ -610,7 +625,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
@@ -621,12 +636,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
*/
if (hw->mac.type != e1000_82576) {
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
/* fall through */
default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -643,7 +658,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
is_l2 = true;
is_l4 = true;
@@ -707,6 +722,32 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
+ return 0;
+}
+
+/**
+ * igb_ptp_set_ts_config - set hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ **/
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igb_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -727,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.mult = 1;
@@ -747,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -766,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
@@ -798,10 +839,13 @@ void igb_ptp_init(struct igb_adapter *adapter)
/* Initialize the time sync interrupts for devices that support it. */
if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
}
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -841,6 +885,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
@@ -864,6 +909,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_PTP))
return;
+ /* reset the tstamp_config */
+ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
switch (adapter->hw.mac.type) {
case e1000_82576:
/* Dial the nominal frequency. */
@@ -876,7 +924,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i211:
/* Enable the timer functions and interrupts. */
wr32(E1000_TSAUXC, 0x0);
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
+ wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS);
break;
default:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07943f..2178f87e9f6 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+ netdev->ethtool_ops = &igbvf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a1463e3d14c..7d6a25c8f88 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -312,17 +312,17 @@ enum igbvf_state_t {
extern char igbvf_driver_name[];
extern const char igbvf_driver_version[];
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 93eb7ee06d3..d608599e123 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries, 3, 3);
}
- if (err) {
+ if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
"Failed to initialize MSI-X interrupts.\n");
@@ -1745,7 +1745,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- if (memcmp(addr->sa_data, hw->mac.addr, 6))
+ if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -1910,20 +1910,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
{
struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
struct igbvf_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
u32 mss_l4len_idx, l4len;
+ unsigned int i;
+ int err;
+
*hdr_len = 0;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "igbvf_tso returning an error\n");
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ return err;
}
l4len = tcp_hdrlen(skb);
@@ -2014,12 +2012,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
@@ -2343,10 +2341,9 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
struct igbvf_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
+ if (new_mtu < 68 || new_mtu > INT_MAX - ETH_HLEN - ETH_FCS_LEN ||
+ max_frame > MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- }
#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
@@ -2638,21 +2635,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
}
@@ -2699,7 +2690,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ei->get_variants) {
err = ei->get_variants(adapter);
if (err)
- goto err_ioremap;
+ goto err_get_variants;
}
/* setup adapter struct */
@@ -2796,6 +2787,7 @@ err_hw_init:
kfree(adapter->rx_ring);
err_sw_init:
igbvf_reset_interrupt_capability(adapter);
+err_get_variants:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index eea0e10ce12..955ad8c2c53 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
- memcpy(hw->mac.perm_addr, addr, 6);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
else
ret_val = -E1000_ERR_MAC_INIT;
}
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
memset(msgbuf, 0, 12);
msgbuf[0] = E1000_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
index 4d2ae97ff1b..1180cd59b57 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb.h
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <asm/byteorder.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -187,21 +186,21 @@ enum ixgb_state_t {
};
/* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2f8e3..b311e9e710d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+ netdev->ethtool_ops = &ixgb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 2a99a35c33a..0bd5d72e1af 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
};
/* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
- u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, u32 pad);
/* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 9f6b236828e..60801273915 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
+ pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
+ pr_err("No usable DMA configuration, aborting\n");
+ goto err_dma_mask;
}
}
@@ -1226,17 +1220,15 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
unsigned int i;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
u16 ipcse, tucse, mss;
- int err;
if (likely(skb_is_gso(skb))) {
struct ixgb_buffer *buffer_info;
struct iphdr *iph;
+ int err;
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
@@ -1527,12 +1519,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int tso;
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (skb->len <= 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1549,7 +1541,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgb_tso(adapter, skb);
if (tso < 0) {
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0ac6b11c6e4..ac9f2148cdc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -55,7 +56,7 @@
#include <net/busy_poll.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
#undef pr_fmt
@@ -67,7 +68,11 @@
#define IXGBE_MAX_TXD 4096
#define IXGBE_MIN_TXD 64
+#if (PAGE_SIZE < 8192)
#define IXGBE_DEFAULT_RXD 512
+#else
+#define IXGBE_DEFAULT_RXD 128
+#endif
#define IXGBE_MAX_RXD 4096
#define IXGBE_MIN_RXD 64
@@ -150,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -187,11 +191,11 @@ struct ixgbe_rx_buffer {
struct ixgbe_queue_stats {
u64 packets;
u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
};
struct ixgbe_tx_queue_stats {
@@ -219,6 +223,15 @@ enum ixgbe_ring_state_t {
__IXGBE_RX_FCOE,
};
+struct ixgbe_fwd_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct net_device *netdev;
+ struct ixgbe_adapter *real_adapter;
+ unsigned int tx_base_queue;
+ unsigned int rx_base_queue;
+ int pool;
+};
+
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
@@ -236,12 +249,12 @@ struct ixgbe_ring {
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
+ struct ixgbe_fwd_adapter *l2_accel_priv;
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
- unsigned long last_rx_timestamp;
unsigned long state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -293,6 +306,12 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_MAX_L2A_QUEUES 4
+#define IXGBE_BAD_L2A_QUEUE 3
+#define IXGBE_MAX_MACVLANS 31
+#define IXGBE_MAX_DCBMACVLANS 8
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -343,7 +362,7 @@ struct ixgbe_ring_container {
for (pos = (head).ring; pos != NULL; pos = pos->next)
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
+ ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_Q_VECTORS of these are allocated,
@@ -369,11 +388,13 @@ struct ixgbe_q_vector {
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
-#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
-#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
-#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
+#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
+#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
+#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
@@ -394,18 +415,19 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = true;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBE_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->stats.yields++;
#endif
- } else
+ } else {
/* we don't care if someone yielded */
q_vector->state = IXGBE_QV_STATE_NAPI;
- spin_unlock(&q_vector->lock);
+ }
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -413,14 +435,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
{
int rc = false;
- spin_lock(&q_vector->lock);
+ spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
IXGBE_QV_STATE_NAPI_YIELD));
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
- spin_unlock(&q_vector->lock);
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -432,12 +455,13 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
if ((q_vector->state & IXGBE_QV_LOCKED)) {
q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->stats.yields++;
#endif
- } else
+ } else {
/* preserve yield marks */
q_vector->state |= IXGBE_QV_STATE_POLL;
+ }
spin_unlock_bh(&q_vector->lock);
return rc;
}
@@ -451,17 +475,32 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
rc = true;
- q_vector->state = IXGBE_QV_STATE_IDLE;
+ /* will reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBE_QV_STATE_DISABLED;
spin_unlock_bh(&q_vector->lock);
return rc;
}
/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
- WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
+ WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
+
+/* false if QV is currently owned */
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBE_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBE_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+
+ return rc;
+}
+
#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -487,10 +526,16 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
return false;
}
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
+
+static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
+{
+ return true;
+}
+
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
@@ -508,8 +553,10 @@ struct hwmon_attr {
};
struct hwmon_buff {
- struct device *device;
- struct hwmon_attr *hwmon_list;
+ struct attribute_group group;
+ const struct attribute_group *groups[2];
+ struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
+ struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
unsigned int n_hwmon;
};
#endif /* CONFIG_IXGBE_HWMON */
@@ -539,6 +586,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
+static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
+
#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC(R, i) \
@@ -560,6 +612,15 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -696,6 +757,7 @@ struct ixgbe_adapter {
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 wol;
u16 bd_number;
@@ -711,9 +773,11 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -729,15 +793,17 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
- struct hwmon_buff ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon_buff;
#endif /* CONFIG_IXGBE_HWMON */
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
u8 default_up;
+ unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
};
struct ixgbe_fdir_filter {
@@ -751,9 +817,13 @@ enum ixgbe_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
+ __IXGBE_DISABLED,
+ __IXGBE_REMOVING,
__IXGBE_SERVICE_SCHED,
+ __IXGBE_SERVICE_INITED,
__IXGBE_IN_SFP_INIT,
__IXGBE_PTP_RUNNING,
+ __IXGBE_PTP_TX_IN_PROGRESS,
};
struct ixgbe_cb {
@@ -786,93 +856,95 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
- struct ixgbe_adapter *,
- struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
- struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+ struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+ struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
+void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
#endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
#endif /* CONFIG_IXGBE_HWMON */
#ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
- struct ixgbe_tx_buffer *first,
- u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
- struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
- struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
#ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
#else
static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,35 +956,22 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
-static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
- return;
-
- __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
-
- /*
- * Update the last_rx_timestamp timer in order to enable watchdog check
- * for error case of latched timestamp on a dropped packet.
- */
- rx_ring->last_rx_timestamp = jiffies;
-}
-
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
#endif
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *tx_ring);
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fee4f3..15609331ec1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -40,10 +41,10 @@
#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
**/
static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
u16 pcie_devctl2;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
/* only take action if timeout value is defaulted to 0 */
if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
- pci_read_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
- pci_write_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+ ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -138,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -154,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
- hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
-
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- MDIO_MMD_PMAPMD,
- &link_reg);
+ MDIO_MMD_PMAPMD,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
/* Enable Tx Atlas so packets can be transmitted again */
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
+ analog_val);
}
/* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return 0;
}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 atlas_ctl;
IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1237,14 +1242,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers.
- */
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
- int strategy)
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy)
{
u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
- return;
}
static struct ixgbe_mac_operations mac_ops_82598 = {
@@ -1315,7 +1318,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 007a0083a63..bc7c924240a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -47,24 +48,26 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
u32 fwsm, manc, factps;
@@ -91,11 +94,11 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
* and MNG not enabled
*/
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- !hw->mng_fw_enabled) {
+ !ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ &ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u16 list_offset, data_offset, data_value;
- bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -130,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset = NULL;
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Need SW/FW semaphore around AUTOC writes if LESM on,
- * likewise reset_pipeline requires lock as it also writes
- * AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto setup_sfp_out;
-
- got_lock = true;
- }
-
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
- IXGBE_AUTOC_LMS_10G_SERIAL));
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- ret_val = ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = false;
- }
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599(). Note, that locked can only be true in cases
+ * where this function doesn't return an error.
+ **/
+static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold a the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ **/
+static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = 0;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ locked = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -276,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -294,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
+ ixgbe_link_speed *speed,
bool *autoneg)
{
s32 status = 0;
@@ -456,12 +514,20 @@ out:
*
* Disables link, should be called during D3 power down sequence.
*
- */
+ **/
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
{
- u32 autoc2_reg;
+ u32 autoc2_reg, fwsm;
+ u16 ee_ctrl_2 = 0;
+
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
- if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ /* Check to see if MNG FW could be enabled */
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+
+ if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
+ !hw->wol_enabled &&
+ ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -477,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
/* Disable tx laser; allow 100us to go dark per spec */
esdp_reg |= IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
**/
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
- * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- * @hw: pointer to hardware structure
- * @speed: link speed to set
- *
- * We set the module speed differently for fixed fiber. For other
- * multi-speed devices we don't have an error value so here if we
- * detect an error we just log it and exit.
- */
-static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed)
-{
- s32 status;
- u8 rs, eeprom_data;
-
- switch (speed) {
- case IXGBE_LINK_SPEED_10GB_FULL:
- /* one bit mask same as setting on */
- rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
- break;
- case IXGBE_LINK_SPEED_1GB_FULL:
- rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
- break;
- default:
- hw_dbg(hw, "Invalid fixed module speed\n");
- return;
- }
-
- /* Set RS0 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
- goto out;
- }
-
- /* Set RS1 */
- status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- &eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
- goto out;
- }
-
- eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
- status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
- IXGBE_I2C_EEPROM_DEV_ADDR2,
- eeprom_data);
- if (status) {
- hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
- goto out;
- }
-out:
- return;
-}
-
-/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -667,8 +672,8 @@ out:
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
/* Set the module link speed */
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- ixgbe_set_fiber_fixed_speed(hw,
- IXGBE_LINK_SPEED_1GB_FULL);
- break;
case ixgbe_media_type_fiber:
esdp_reg &= ~IXGBE_ESDP_SDP5;
esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -819,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
+ highest_link_speed,
+ autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
out:
if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
- hw_dbg(hw, "Smartspeed has downgraded the link speed from "
- "the maximum advertised\n");
+ hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
return status;
}
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
- u32 autoc, pma_pmd_1g, link_mode, start_autoc;
+ u32 pma_pmd_1g, link_mode, links_reg, i;
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 orig_autoc = 0;
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
- u32 links_reg;
- u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- bool got_lock = false;
- bool autoneg = false;
+
+ /* holds the value of AUTOC register at this current point in time */
+ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ /* holds the cached value of AUTOC register */
+ u32 orig_autoc = 0;
+ /* temporary variable used for comparison purposes */
+ u32 autoc = current_autoc;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
if (hw->mac.orig_link_settings_stored)
- autoc = hw->mac.orig_autoc;
+ orig_autoc = hw->mac.orig_autoc;
else
- autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ orig_autoc = autoc;
- orig_autoc = autoc;
- start_autoc = hw->mac.cached_autoc;
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1008,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1017,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
}
- if (autoc != start_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is on,
- * likewise reset_pipeline requires us to hold this lock as
- * it also writes to AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != 0)
- goto out;
-
- got_lock = true;
- }
-
+ if (autoc != current_autoc) {
/* Restart link */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- hw->mac.cached_autoc = autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (status)
+ goto out;
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -1067,9 +1051,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
- hw_dbg(hw, "Autoneg did not "
- "complete.\n");
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ hw_dbg(hw, "Autoneg did not complete.\n");
}
}
}
@@ -1091,14 +1074,14 @@ out:
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc2;
+ u32 ctrl, i, autoc, autoc2;
u32 curr_lms;
bool link_up = false;
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset(hw);
/* remember AUTOC from before we reset */
- if (hw->mac.cached_autoc)
- curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
- else
- curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
- IXGBE_AUTOC_LMS_MASK;
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
mac_reset_top:
/*
@@ -1205,7 +1184,7 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
}
if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = hw->mac.cached_autoc;
+ hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
@@ -1227,41 +1206,25 @@ mac_reset_top:
* Likewise if we support WoL we don't want change the
* LMS state either.
*/
- if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
hw->wol_enabled)
hw->mac.orig_autoc =
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
- if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is
- * on, likewise reset_pipeline requires us to hold
- * this lock as it also writes to AUTOC.
- */
- bool got_lock = false;
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status)
- goto reset_hw_out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- hw->mac.cached_autoc = hw->mac.orig_autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (status)
+ goto reset_hw_out;
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_AUTOC2_UPPER_MASK);
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
}
}
@@ -1283,7 +1246,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1294,7 +1257,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -1308,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
/*
@@ -1321,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
+ hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1336,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* - write 0 to bit 8 of FDIRCMD register
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
/*
* Clear FDIR Hash register to clear any leftover hashes
@@ -1356,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
/* Poll init-done after we write FDIRCTRL register */
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1405,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1490,7 +1453,7 @@ do { \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1566,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u32 fdircmd;
@@ -1592,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1616,7 +1579,7 @@ do { \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
{
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 bucket_hash = 0;
+ u32 bucket_hash = 0, hi_dword = 0;
+ int i;
/* Apply masks to input data */
- input->dword_stream[0] &= input_mask->dword_stream[0];
- input->dword_stream[1] &= input_mask->dword_stream[1];
- input->dword_stream[2] &= input_mask->dword_stream[2];
- input->dword_stream[3] &= input_mask->dword_stream[3];
- input->dword_stream[4] &= input_mask->dword_stream[4];
- input->dword_stream[5] &= input_mask->dword_stream[5];
- input->dword_stream[6] &= input_mask->dword_stream[6];
- input->dword_stream[7] &= input_mask->dword_stream[7];
- input->dword_stream[8] &= input_mask->dword_stream[8];
- input->dword_stream[9] &= input_mask->dword_stream[9];
- input->dword_stream[10] &= input_mask->dword_stream[10];
+ for (i = 0; i <= 10; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = ntohl(input->dword_stream[0]);
/* generate common hash dword */
- hi_hash_dword = ntohl(input->dword_stream[1] ^
- input->dword_stream[2] ^
- input->dword_stream[3] ^
- input->dword_stream[4] ^
- input->dword_stream[5] ^
- input->dword_stream[6] ^
- input->dword_stream[7] ^
- input->dword_stream[8] ^
- input->dword_stream[9] ^
- input->dword_stream[10]);
+ for (i = 1; i <= 10; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = ntohl(hi_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* Process remaining 30 bit of the key */
- IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
- IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+ for (i = 1; i <= 15; i++)
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
/*
* Limit hash to 13 bits since max bucket count is 8K.
@@ -1716,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
u32 mask = ntohs(input_mask->formatted.dst_port);
+
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1950,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 core_ctl;
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
+ (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
@@ -2001,7 +1937,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
- hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
if (ret_val == 0)
ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2195,7 @@ fw_version_err:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2301,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 anlp1_reg = 0;
@@ -2380,11 +2315,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
- autoc_reg = hw->mac.cached_autoc;
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
@@ -2565,7 +2501,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.release_swfw_sync = &ixgbe_release_swfw_sync,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
- .mng_fw_enabled = &ixgbe_mng_enabled,
+ .prot_autoc_read = &prot_autoc_read_82599,
+ .prot_autoc_write = &prot_autoc_write_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b617b..4e5385a2a46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -40,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
+ u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
bool link_up;
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
hw->mac.ops.check_link(hw, &speed, &link_up, false);
/* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
- bool got_lock = false;
+ bool locked = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
- case ixgbe_media_type_fiber_fixed:
- case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (ret_val)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
+ case ixgbe_media_type_fiber:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* LESM is on, likewise reset_pipeline requries the lock as
* it also writes AUTOC.
*/
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- ixgbe_device_supports_autoneg_fc(hw)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -281,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -302,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -491,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
@@ -656,20 +650,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
**/
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
- struct ixgbe_mac_info *mac = &hw->mac;
u16 link_status;
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
- pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
- &link_status);
+ link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
hw->bus.width = ixgbe_convert_bus_width(link_status);
hw->bus.speed = ixgbe_convert_bus_speed(link_status);
- mac->ops.set_lan_id(hw);
+ hw->mac.ops.set_lan_id(hw);
return 0;
}
@@ -827,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
eeprom->address_bits = 16;
else
eeprom->address_bits = 8;
- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
+ eeprom->type, eeprom->word_size, eeprom->address_bits);
}
return 0;
@@ -1208,7 +1198,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*/
hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
- hw_dbg(hw, "Detected EEPROM page size = %d words.",
+ hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
hw->eeprom.word_page_size);
out:
return status;
@@ -1401,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
}
if (i == timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
- "not granted.\n");
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
/*
* this release is particularly important because our attempts
* above to get the semaphore may have succeeded, and if there
@@ -1447,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
- "not granted.\n");
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -1496,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
@@ -1545,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
+ u16 count)
{
u32 eec;
u32 mask;
@@ -1749,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -1822,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2066,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
return 0;
@@ -2084,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
+ hw->mac.mc_filter_type);
return 0;
}
@@ -2119,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2194,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
@@ -2406,7 +2398,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
- case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2437,6 +2428,53 @@ out:
}
/**
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ **/
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ s16 devctl2;
+ u32 pollcnt;
+
+ devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
@@ -2447,16 +2485,16 @@ out:
**/
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
s32 status = 0;
- u32 i;
+ u32 i, poll;
u16 value;
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ ixgbe_removed(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -2481,10 +2519,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
udelay(100);
- pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
- &value);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ goto out;
if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -2564,6 +2604,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ * @locked: bool to indicate whether the SW/FW lock should be taken. Never
+ * true in this the generic case.
+ *
+ * The default case requires no protection so just to the register read.
+ **/
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ **/
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return 0;
+}
+
+/**
* ixgbe_disable_rx_buff_generic - Stops the receive data path
* @hw: pointer to hardware structure
*
@@ -2590,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
/* For informational purposes only */
if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
return 0;
@@ -2641,6 +2709,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
+ bool locked = false;
/*
* Link must be up to auto-blink the LEDs;
@@ -2649,28 +2718,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- bool got_lock = false;
-
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
- got_lock = true;
- }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
+
IXGBE_WRITE_FLUSH(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2690,33 +2750,21 @@ out:
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
- bool got_lock = false;
-
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
+ bool locked = false;
- got_lock = true;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (ret_val)
+ goto out;
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (ret_val)
+ goto out;
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2738,7 +2786,7 @@ out:
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+ u16 *san_mac_offset)
{
s32 ret_val;
@@ -2784,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
&san_mac_data);
@@ -2817,7 +2865,6 @@ san_mac_addr_clr:
**/
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u16 msix_count = 1;
u16 max_msix_count;
u16 pcie_offset;
@@ -2836,7 +2883,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
return msix_count;
}
- pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
+ msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
+ if (ixgbe_removed(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
@@ -2868,6 +2917,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+ if (ixgbe_removed(hw->hw_addr))
+ goto done;
+
if (!mpsar_lo && !mpsar_hi)
goto done;
@@ -3020,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
s32 regindex;
u32 bitindex;
@@ -3142,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Ignore it. */
vfta_changed = false;
}
- }
- else
+ } else {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
}
if (vfta_changed)
@@ -3244,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d259dc76604..2ae5d4b8fc9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -38,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -60,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
+ u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
+ u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -91,13 +92,17 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -124,24 +129,57 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define IXGBE_FAILED_READ_REG 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
+
+static inline bool ixgbe_removed(void __iomem *addr)
+{
+ return unlikely(!addr);
+}
+
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (ixgbe_removed(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value))
#ifndef writeq
-#define writeq(val, addr) writel((u32) (val), addr); \
- writel((u32) (val >> 32), (addr + 4));
+#define writeq writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
#endif
-#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (ixgbe_removed(reg_addr))
+ return;
+ writeq(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) (\
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
+ ixgbe_write_reg((a), (reg) + ((offset) << 2), (value))
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) (\
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) \
+ ixgbe_read_reg((a), (reg) + ((offset) << 2))
-#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS)
#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e000131..a689ee0d4be 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
for (i = 0; i < MAX_USER_PRIORITY; i++)
map[i] = IXGBE_RTRUP2TC_UP_MASK &
(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
- return;
}
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37a7cb..d3ba63f9ad3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 05e23b80b5e..3b932fe64ab 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -241,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -256,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d71d9ce3e39..90c370230e2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -30,17 +31,17 @@
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
- * 1 WSP - Weighted Strict Priority
- */
+ * 1 WSP - Weighted Strict Priority
+ */
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
- * 1 WRR - Weighted Round Robin
- */
+ * 1 WRR - Weighted Round Robin
+ */
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
- * clear!
- */
+ * clear!
+ */
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
@@ -55,11 +56,11 @@
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
- * buffers enable
- */
+ * buffers enable
+ */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
- * (RSS) enable
- */
+ * (RSS) enable
+ */
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -80,8 +81,8 @@
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
- * 1 SP - Strict Priority
- */
+ * 1 SP - Strict Priority
+ */
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1ef27..5172b6b12c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
- u8 setting)
+ u8 setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
}
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
- u8 *setting)
+ u8 *setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index c5933f6dcee..5e2c1e35e51 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -252,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
- if (adapter->ixgbe_dbg_adapter)
- debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
adapter->ixgbe_dbg_adapter = NULL;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index e8649abf97c..a452730a327 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -140,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
/ sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN)
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
@@ -151,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -160,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
- /* SFP type is needed for get_link_capabilities */
- if (hw->phy.media_type & (ixgbe_media_type_fiber |
- ixgbe_media_type_fiber_qsfp)) {
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- hw->phy.ops.identify_sfp(hw);
- }
-
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -302,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
}
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -367,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
static void ixgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -389,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
static int ixgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -442,14 +436,14 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
static int ixgbe_get_regs_len(struct net_device *netdev)
{
-#define IXGBE_REGS_LEN 1129
+#define IXGBE_REGS_LEN 1139
return IXGBE_REGS_LEN * sizeof(u32);
}
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
static void ixgbe_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -602,22 +596,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
/* DCB */
- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
- for (i = 0; i < 8; i++)
- regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
- for (i = 0; i < 8; i++)
- regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
- for (i = 0; i < 8; i++)
- regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
- for (i = 0; i < 8; i++)
- regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */
+ regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+ regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+ for (i = 0; i < 8; i++)
+ regs_buff[833 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[841 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[849 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+ for (i = 0; i < 8; i++)
+ regs_buff[857 + i] =
+ IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < 8; i++)
- regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+ regs_buff[865 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
for (i = 0; i < 8; i++)
- regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+ regs_buff[873 + i] =
+ IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
/* Statistics */
regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +782,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* 82599 X540 specific registers */
regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+ /* 82599 X540 specific DCB registers */
+ regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+ for (i = 0; i < 4; i++)
+ regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+ regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+ /* same as RTTQCNRM */
+ regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+ /* same as RTTQCNRR */
+
+ /* X540 specific DCB registers */
+ regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+ regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
}
static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -766,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
}
static int ixgbe_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -872,7 +911,7 @@ err:
}
static void ixgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 nvm_track_id;
@@ -894,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -907,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
@@ -1036,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
}
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 temp;
@@ -1064,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
@@ -1072,7 +1111,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1082,12 +1121,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1139,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = 0;
data[i+1] = 0;
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = 0;
data[i+1] = 0;
data[i+2] = 0;
@@ -1110,12 +1149,12 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
data[i] = ring->stats.yields;
data[i+1] = ring->stats.misses;
data[i+2] = ring->stats.cleaned;
@@ -1134,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+ u8 *data)
{
char *p = (char *)data;
int i;
@@ -1157,28 +1196,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_misses", i);
+ sprintf(p, "tx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_ll_cleaned", i);
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_misses", i);
+ sprintf(p, "rx_queue_%u_bp_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_ll_cleaned", i);
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
}
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
sprintf(p, "tx_pb_%u_pxon", i);
@@ -1202,6 +1241,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
+
+ if (ixgbe_removed(hw->hw_addr)) {
+ *data = 1;
+ return 1;
+ }
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1297,61 +1341,60 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
static const u32 test_pattern[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
- before = readl(adapter->hw.hw_addr + reg);
- writel((test_pattern[pat] & write),
- (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
- e_err(drv, "pattern test reg %04X failed: got "
- "0x%08X expected 0x%08X\n",
+ e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
- writel(before, adapter->hw.hw_addr + reg);
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, adapter->hw.hw_addr + reg);
+ ixgbe_write_reg(&adapter->hw, reg, before);
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
u32 val, before;
- before = readl(adapter->hw.hw_addr + reg);
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ *data = 1;
+ return 1;
+ }
+ before = ixgbe_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err(drv, "set/check reg %04X test failed: got 0x%08X "
- "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- writel(before, (adapter->hw.hw_addr + reg));
- return 1;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
}
- writel(before, (adapter->hw.hw_addr + reg));
- return 0;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
-#define REG_PATTERN_TEST(reg, mask, write) \
- do { \
- if (reg_pattern_test(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
-
-#define REG_SET_AND_CHECK(reg, mask, write) \
- do { \
- if (reg_set_and_check(adapter, data, reg, mask, write)) \
- return 1; \
- } while (0) \
-
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
const struct ixgbe_reg_test *test;
u32 value, before, after;
u32 i, toggle;
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(drv, "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
toggle = 0x7FFFF3FF;
@@ -1374,18 +1417,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writeable on newer MACs.
*/
- before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
- value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
- after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
+ before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS);
+ value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
+ after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
- e_err(drv, "failed STATUS register test got: 0x%08X "
- "expected: 0x%08X\n", after, value);
+ e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
/* restore previous status */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
+ ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before);
/*
* Perform the remainder of the register test, looping through
@@ -1393,38 +1436,47 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ (test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -1473,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return -1;
}
} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
shared_int = false;
} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1503,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1527,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
- if (!(adapter->test_icr &mask)) {
+ if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
@@ -1542,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1904,11 +1956,21 @@ out:
}
static void ixgbe_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ e_err(hw, "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ data[4] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
struct ixgbe_hw *hw = &adapter->hw;
@@ -1917,14 +1979,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].clear_to_send) {
- netdev_warn(netdev, "%s",
- "offline diagnostic is not "
- "supported when VFs are "
- "present\n");
+ netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
data[0] = 1;
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IXGBE_TESTING,
&adapter->state);
@@ -1966,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
* loopback diagnostic. */
if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED)) {
- e_info(hw, "Skip MAC loopback diagnostic in VT "
- "mode\n");
+ e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
data[3] = 0;
goto skip_loopback;
}
@@ -2007,7 +2066,7 @@ skip_ol_tests:
}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_hw *hw = &adapter->hw;
int retval = 0;
@@ -2023,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
}
static void ixgbe_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2110,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
}
static int ixgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2151,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- e_info(probe, "rx-usecs value high enough "
- "to re-enable RSC\n");
+ e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
return true;
}
/* if interrupt rate is too high then disable RSC */
@@ -2165,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
@@ -2212,13 +2270,13 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
#if IS_ENABLED(CONFIG_BQL)
/* detect ITR changes that require update of TXDCTL.WTHRESH */
- if ((adapter->tx_itr_setting > 1) &&
+ if ((adapter->tx_itr_setting != 1) &&
(adapter->tx_itr_setting < IXGBE_100K_ITR)) {
if ((tx_itr_prev == 1) ||
- (tx_itr_prev > IXGBE_100K_ITR))
+ (tx_itr_prev >= IXGBE_100K_ITR))
need_reset = true;
} else {
- if ((tx_itr_prev > 1) &&
+ if ((tx_itr_prev != 1) &&
(tx_itr_prev < IXGBE_100K_ITR))
need_reset = true;
}
@@ -2350,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2362,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2716,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
- e_warn(drv, "enabling UDP RSS: fragmented packets"
- " may arrive out of order to the stack above\n");
+ e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
@@ -3028,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index f58db453a97..25a3dfef33e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
/* return 0 to bypass going to ULD for DDPed data */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
rc = 0;
break;
/* unmap the sg list when FCPRSP is received */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
ddp->sgc = 0;
/* fall through */
/* if DDP length is present pass it through to ULD */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
if (ddp->len)
rc = ddp->len;
break;
/* no match will return as an error */
- case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
default:
break;
}
@@ -585,7 +586,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
struct dma_pool *pool;
char pool_name[32];
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 3a02759b5e9..0772b7730fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -80,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 90b4e1089ec..2d9451e3968 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -498,6 +499,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
u16 fcoe_i = 0;
#endif
+ bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
/* only proceed if SR-IOV is enabled */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
@@ -510,7 +512,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4)) {
+ if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
@@ -697,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
- int err, vector_threshold;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -711,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold) {
+ if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
@@ -852,7 +846,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- ring->queue_index = txr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ txr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
@@ -895,7 +893,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- ring->queue_index = rxr_idx;
+ if (adapter->num_rx_pools > 1)
+ ring->queue_index =
+ rxr_idx % adapter->num_rx_queues_per_pool;
+ else
+ ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
@@ -1111,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
err = pci_enable_msi(adapter->pdev);
if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+ err);
return;
}
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0ade0cd5ef5..f5aa3311ea2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -44,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -63,10 +65,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.15.1-k"
+#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2013 Intel Corporation.";
+ "Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -132,7 +134,7 @@ static struct notifier_block dca_notifier = {
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
MODULE_PARM_DESC(max_vfs,
- "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
+ "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
static unsigned int allow_unsupported_sfp;
@@ -150,10 +152,11 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
- int pos = 0;
struct pci_dev *parent_dev;
struct pci_bus *parent_bus;
@@ -165,11 +168,13 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
if (!parent_dev)
return -1;
- pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP);
- if (!pos)
+ if (!pci_is_pcie(parent_dev))
return -1;
- pci_read_config_word(parent_dev, pos + reg, value);
+ pcie_capability_read_word(parent_dev, reg, value);
+ if (*value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
+ return -1;
return 0;
}
@@ -247,7 +252,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
max_gts = 4 * width;
break;
case PCIE_SPEED_8_0GT:
- /* 128b/130b encoding only reduces throughput by 1% */
+ /* 128b/130b encoding reduces throughput by less than 2% */
max_gts = 8 * width;
break;
default:
@@ -265,7 +270,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
width,
(speed == PCIE_SPEED_2_5GT ? "20%" :
speed == PCIE_SPEED_5_0GT ? "20%" :
- speed == PCIE_SPEED_8_0GT ? "N/a" :
+ speed == PCIE_SPEED_8_0GT ? "<2%" :
"Unknown"));
if (max_gts < expected_gts) {
@@ -279,16 +284,125 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
schedule_work(&adapter->service_task);
}
+static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ e_dev_err("Adapter removed\n");
+ if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
+ ixgbe_service_event_schedule(adapter);
+}
+
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_STATUS) {
+ ixgbe_remove_adapter(hw);
+ return;
+ }
+ value = ixgbe_read_reg(hw, IXGBE_STATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbe_remove_adapter(hw);
+}
+
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
+{
+ u16 value;
+
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD) {
+ ixgbe_remove_adapter(hw);
+ return true;
+ }
+ return false;
+}
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ pci_read_config_word(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ return value;
+}
+
+#ifdef CONFIG_PCI_IOV
+static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ pci_read_config_dword(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_DWORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ return value;
+}
+#endif /* CONFIG_PCI_IOV */
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+ pci_write_config_word(adapter->pdev, reg, value);
+}
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
/* flush memory to make sure state is correct before next watchdog */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
@@ -872,11 +986,18 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
- struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_hw *hw;
+ u32 head, tail;
+
+ if (ring->l2_accel_priv)
+ adapter = ring->l2_accel_priv->real_adapter;
+ else
+ adapter = netdev_priv(ring->netdev);
- u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
- u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+ hw = &adapter->hw;
+ head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+ tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
if (head != tail)
return (head < tail) ?
@@ -1227,7 +1348,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
#ifdef IXGBE_FCOE
@@ -1308,7 +1431,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
* such as IA-64).
*/
wmb();
- writel(val, rx_ring->tail);
+ ixgbe_write_tail(rx_ring, val);
}
static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
@@ -1443,7 +1566,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
hdr.network += ETH_HLEN;
/* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if (protocol == htons(ETH_P_8021Q)) {
if ((hdr.network - data) > (max_len - VLAN_HLEN))
return max_len;
@@ -1452,7 +1575,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
}
/* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
return max_len;
@@ -1466,7 +1589,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
/* record next protocol if header is present */
if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ } else if (protocol == htons(ETH_P_IPV6)) {
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
return max_len;
@@ -1474,7 +1597,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
nexthdr = hdr.ipv6->nexthdr;
hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
- } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+ } else if (protocol == htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
hlen = FCOE_HEADER_LEN;
@@ -1567,7 +1690,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
- ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
+ if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+ ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1585,7 +1709,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (ixgbe_qv_ll_polling(q_vector))
+ if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
@@ -1989,7 +2113,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- do {
+ while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
@@ -2064,7 +2188,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* update budget accounting */
total_rx_packets++;
- } while (likely(total_rx_packets < budget));
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -2097,7 +2221,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
ixgbe_for_each_ring(ring, q_vector->rx) {
found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
if (found)
ring->stats.cleaned += found;
else
@@ -2593,9 +2717,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC Err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int reinit_count = 0;
@@ -2809,9 +2936,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
@@ -2963,7 +3093,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
/*
* set WTHRESH to encourage burst writeback, it should not be set
@@ -3005,7 +3135,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
struct ixgbe_q_vector *q_vector = ring->q_vector;
if (q_vector)
- netif_set_xps_queue(adapter->netdev,
+ netif_set_xps_queue(ring->netdev,
&q_vector->affinity_mask,
ring->queue_index);
}
@@ -3302,6 +3432,8 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3326,6 +3458,8 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
u32 rxdctl;
u8 reg_idx = ring->reg_idx;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -3366,7 +3500,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
+ ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
@@ -3395,7 +3529,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int rss_i = adapter->ring_feature[RING_F_RSS].indices;
- int p;
+ u16 pool;
/* PSRTYPE must be initialized in non 82598 adapters */
u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -3412,9 +3546,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
else if (rss_i > 1)
psrtype |= 1 << 29;
- for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
- psrtype);
+ for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
@@ -3636,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3683,7 +3787,11 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3713,7 +3821,11 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i]->reg_idx;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+ if (ring->l2_accel_priv)
+ continue;
+ j = ring->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -3735,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3743,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV mode significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3793,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3807,34 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
- } else {
- /*
- * Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscuous mode so
- * that we can at least receive multicast traffic
- */
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3843,14 +4082,23 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
}
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3871,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3893,15 +4142,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- local_bh_disable(); /* for ixgbe_qv_lock_napi() */
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
napi_disable(&adapter->q_vector[q_idx]->napi);
- while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
+ while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
pr_info("QV %d locked\n", q_idx);
- mdelay(1);
+ usleep_range(1000, 20000);
}
}
- local_bh_enable();
}
#ifdef CONFIG_IXGBE_DCB
@@ -3989,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4031,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4041,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4067,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4118,6 +4375,218 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
spin_unlock(&adapter->fdir_perfect_lock);
}
+static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
+ struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ /* No unicast promiscuous support for VMDQ devices. */
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+ vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
+
+ /* clear the affected bit */
+ vmolr &= ~IXGBE_VMOLR_MPE;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ hw->mac.ops.update_mc_addr_list(hw, dev);
+ }
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+}
+
+static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int rss_i = adapter->num_rx_queues_per_pool;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u16 pool = vadapter->pool;
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_L2HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
+}
+
+/**
+ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ unsigned long size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbe_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+ if (IXGBE_CB(skb)->page_released) {
+ dma_unmap_page(dev,
+ IXGBE_CB(skb)->dma,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ }
+ dev_kfree_skb(skb);
+ }
+ rx_buffer->skb = NULL;
+ if (rx_buffer->dma)
+ dma_unmap_page(dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+ if (rx_buffer->page)
+ __free_pages(rx_buffer->page,
+ ixgbe_rx_pg_order(rx_ring));
+ rx_buffer->page = NULL;
+ }
+
+ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
+ struct ixgbe_ring *rx_ring)
+{
+ struct ixgbe_adapter *adapter = vadapter->real_adapter;
+ int index = rx_ring->queue_index + vadapter->rx_base_queue;
+
+ /* shutdown specific queue receive and wait for dma to settle */
+ ixgbe_disable_rx_queue(adapter, rx_ring);
+ usleep_range(10000, 20000);
+ ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_clean_rx_ring(rx_ring);
+ rx_ring->l2_accel_priv = NULL;
+}
+
+static int ixgbe_fwd_ring_down(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int txbase = accel->tx_base_queue;
+ int i;
+
+ netif_tx_stop_all_queues(vdev);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+ adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+ adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
+ }
+
+
+ return 0;
+}
+
+static int ixgbe_fwd_ring_up(struct net_device *vdev,
+ struct ixgbe_fwd_adapter *accel)
+{
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase, txbase, queues;
+ int i, baseq, err = 0;
+
+ if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+ return 0;
+
+ baseq = accel->pool * adapter->num_rx_queues_per_pool;
+ netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ accel->pool, adapter->num_rx_pools,
+ baseq, baseq + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+
+ accel->netdev = vdev;
+ accel->rx_base_queue = rxbase = baseq;
+ accel->tx_base_queue = txbase = baseq;
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->rx_ring[rxbase + i]->netdev = vdev;
+ adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
+ ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ adapter->tx_ring[txbase + i]->netdev = vdev;
+ adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
+ }
+
+ queues = min_t(unsigned int,
+ adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
+ err = netif_set_real_num_tx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ err = netif_set_real_num_rx_queues(vdev, queues);
+ if (err)
+ goto fwd_queue_err;
+
+ if (is_valid_ether_addr(vdev->dev_addr))
+ ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+
+ ixgbe_fwd_psrtype(accel);
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return err;
+fwd_queue_err:
+ ixgbe_fwd_ring_down(vdev, accel);
+ return err;
+}
+
+static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (dfwd->fwd_priv) {
+ err = ixgbe_fwd_ring_up(upper, vadapter);
+ if (err)
+ continue;
+ }
+ }
+ }
+}
+
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -4169,6 +4638,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
+ ixgbe_configure_dfwd(adapter);
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -4186,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_qsfp_active_unknown:
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
+ /* ixgbe_phy_none is set when no SFP module is present */
+ case ixgbe_phy_none:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4337,6 +4809,7 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
+ smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
@@ -4362,9 +4835,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_crit(drv, "Fan has stopped, replace the adapter\n");
}
- /* enable transmits */
- netif_tx_start_all_queues(adapter->netdev);
-
/* bring the link up in the watchdog, this could race with our first
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -4409,8 +4879,12 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
+ if (ixgbe_removed(hw->hw_addr))
+ return;
/* lock SFP init bit to prevent race conditions with the watchdog */
while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
usleep_range(1000, 2000);
@@ -4443,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -4456,59 +4931,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
- **/
-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- unsigned long size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buffer_info)
- return;
-
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ixgbe_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_buffer->skb) {
- struct sk_buff *skb = rx_buffer->skb;
- if (IXGBE_CB(skb)->page_released) {
- dma_unmap_page(dev,
- IXGBE_CB(skb)->dma,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- }
- dev_kfree_skb(skb);
- }
- rx_buffer->skb = NULL;
- if (rx_buffer->dma)
- dma_unmap_page(dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- rx_buffer->dma = 0;
- if (rx_buffer->page)
- __free_pages(rx_buffer->page,
- ixgbe_rx_pg_order(rx_ring));
- rx_buffer->page = NULL;
- }
-
- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-}
-
-/**
* ixgbe_clean_tx_ring - Free Tx Buffers
* @tx_ring: ring to be cleaned
**/
@@ -4585,11 +5007,14 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 rxctrl;
int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBE_DOWN, &adapter->state);
+ if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
+ return; /* do nothing if already down */
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -4608,6 +5033,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ /* disable any upper devices */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv) {
+ netif_tx_stop_all_queues(upper);
+ netif_carrier_off(upper);
+ netif_tx_disable(upper);
+ }
+ }
+ }
+
ixgbe_irq_disable(adapter);
ixgbe_napi_disable_all(adapter);
@@ -4728,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -4816,11 +5258,20 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
+ if (max_vfs > 0)
+ e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
+
/* assign number of SR-IOV VFs */
- if (hw->mac.type != ixgbe_mac_82598EB)
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
+ adapter->num_vfs = 0;
+ e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
+ } else {
+ adapter->num_vfs = max_vfs;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
-#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4838,6 +5289,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -EIO;
}
+ /* PF holds first pool slot */
+ set_bit(0, &adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4867,6 +5320,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
if (!tx_ring->tx_buffer_info)
goto err;
+ u64_stats_init(&tx_ring->syncp);
+
/* round up to nearest 4K */
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -4949,6 +5404,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
@@ -5143,7 +5600,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int err;
+ int err, queues;
/* disallow open during test */
if (test_bit(__IXGBE_TESTING, &adapter->state))
@@ -5168,16 +5625,21 @@ static int ixgbe_open(struct net_device *netdev)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_tx_queues);
+ if (adapter->num_rx_pools > 1)
+ queues = adapter->num_rx_queues_per_pool;
+ else
+ queues = adapter->num_tx_queues;
+
+ err = netif_set_real_num_tx_queues(netdev, queues);
if (err)
goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev,
- adapter->num_rx_pools > 1 ? 1 :
- adapter->num_rx_queues);
+ if (adapter->num_rx_pools > 1 &&
+ adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
+ queues = IXGBE_MAX_L2A_QUEUES;
+ else
+ queues = adapter->num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, queues);
if (err)
goto err_set_queues;
@@ -5199,6 +5661,17 @@ err_setup_tx:
return err;
}
+static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
+{
+ ixgbe_ptp_suspend(adapter);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+}
+
/**
* ixgbe_close - Disables a network interface
* @netdev: network interface device structure
@@ -5216,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
-
ixgbe_release_hw_control(adapter);
return 0;
@@ -5236,6 +5705,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
struct net_device *netdev = adapter->netdev;
u32 err;
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
@@ -5249,6 +5719,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_atomic();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
pci_wake_from_d3(pdev, false);
@@ -5287,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
rtnl_lock();
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
@@ -5346,7 +5814,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
ixgbe_release_hw_control(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -5623,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->state));
+ &(adapter->tx_ring[i]->state));
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -5647,8 +6116,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
u64 eics = 0;
int i;
- /* If we're down or resetting, just bail */
+ /* If we're down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -5749,6 +6219,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *upper;
+ struct list_head *iter;
u32 link_speed = adapter->link_speed;
bool flow_rx, flow_tx;
@@ -5800,6 +6272,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* enable transmits */
+ netif_tx_wake_all_queues(adapter->netdev);
+
+ /* enable any upper devices */
+ rtnl_lock();
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *vlan = netdev_priv(upper);
+
+ if (vlan->fwd_priv)
+ netif_tx_wake_all_queues(upper);
+ }
+ }
+ rtnl_unlock();
+
/* update the default user priority for VFs */
ixgbe_update_default_up(adapter);
@@ -5895,8 +6382,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
- /* if interface is down do nothing */
+ /* if interface is down, removing or resetting, do nothing */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6114,8 +6602,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
- /* If we're already down or resetting, just bail */
+ /* If we're already down, removing or resetting, just bail */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_REMOVING, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return;
@@ -6123,7 +6612,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
netdev_err(adapter->netdev, "Reset adapter\n");
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbe_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -6135,6 +6626,15 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
+ if (ixgbe_removed(adapter->hw.hw_addr)) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbe_down(adapter);
+ rtnl_unlock();
+ }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -6158,6 +6658,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
@@ -6165,16 +6666,14 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -6234,12 +6733,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -6466,7 +6965,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_ring->next_to_use = i;
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ ixgbe_write_tail(tx_ring, i);
return;
dma_error:
@@ -6514,9 +7013,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_network_header(first->skb);
/* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
+ if ((first->protocol != htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != __constant_htons(ETH_P_IP) ||
+ (first->protocol != htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP))
return;
@@ -6549,12 +7048,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
* and write the value to source port portion of compressed dword
*/
if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
- common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+ common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
else
common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else {
@@ -6600,26 +7099,34 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
-#ifdef IXGBE_FCOE
-static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
+static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
+ struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
+#ifdef IXGBE_FCOE
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
int txq;
+#endif
+
+ if (fwd_adapter)
+ return skb->queue_mapping + fwd_adapter->tx_base_queue;
+
+#ifdef IXGBE_FCOE
/*
* only execute the code below if protocol is FCoE
* or FIP and we have FCoE enabled on the adapter
*/
switch (vlan_get_protocol(skb)) {
- case __constant_htons(ETH_P_FCOE):
- case __constant_htons(ETH_P_FIP):
+ case htons(ETH_P_FCOE):
+ case htons(ETH_P_FIP):
adapter = netdev_priv(dev);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -6631,9 +7138,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
txq -= f->indices;
return txq + f->offset;
+#else
+ return fallback(dev, skb);
+#endif
}
-#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6672,7 +7181,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN check the next protocol and store the tag */
- } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
@@ -6684,9 +7193,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
}
- skb_tx_timestamp(skb);
-
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
+ &adapter->state))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
@@ -6696,6 +7205,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
schedule_work(&adapter->ptp_tx_work);
}
+ skb_tx_timestamp(skb);
+
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -6714,8 +7225,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
struct vlan_ethhdr *vhdr;
- if (skb_header_cloned(skb) &&
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+
+ if (skb_cow_head(skb, 0))
goto out_drop;
vhdr = (struct vlan_ethhdr *)skb->data;
vhdr->h_vlan_TCI = htons(tx_flags >>
@@ -6731,7 +7242,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
- if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+ if ((protocol == htons(ETH_P_FCOE)) &&
(tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
@@ -6767,8 +7278,9 @@ out_drop:
return NETDEV_TX_OK;
}
-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
+static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev,
+ struct ixgbe_ring *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
@@ -6784,10 +7296,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
skb_set_tail_pointer(skb, 17);
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
+ tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+
return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
}
+static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ return __ixgbe_xmit_frame(skb, netdev, NULL);
+}
+
/**
* ixgbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -6800,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -6845,7 +7365,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+ return ixgbe_ptp_set_ts_config(adapter, req);
+ case SIOCGHWTSTAMP:
+ return ixgbe_ptp_get_ts_config(adapter, req);
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -6936,10 +7458,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
@@ -6952,10 +7474,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -7044,6 +7566,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ bool pools;
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
@@ -7051,6 +7574,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
tc < MAX_TRAFFIC_CLASS))
return -EINVAL;
+ pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
+ if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
+ return -EBUSY;
+
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
@@ -7305,13 +7832,95 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
}
+static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = NULL;
+ struct ixgbe_adapter *adapter = netdev_priv(pdev);
+ unsigned int limit;
+ int pool, err;
+
+#ifdef CONFIG_RPS
+ if (vdev->num_rx_queues != vdev->num_tx_queues) {
+ netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
+ vdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+#endif
+ /* Check for hardware restriction on number of rx/tx queues */
+ if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
+ vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
+ netdev_info(pdev,
+ "%s: Supports RX/TX Queue counts 1,2, and 4\n",
+ pdev->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
+ adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+ (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
+ return ERR_PTR(-EBUSY);
+
+ fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+ if (!fwd_adapter)
+ return ERR_PTR(-ENOMEM);
+
+ pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
+ adapter->num_rx_pools++;
+ set_bit(pool, &adapter->fwd_bitmask);
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
+
+ /* Force reinit of ring allocation with VMDQ enabled */
+ err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ if (err)
+ goto fwd_add_err;
+ fwd_adapter->pool = pool;
+ fwd_adapter->real_adapter = adapter;
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ return fwd_adapter;
+fwd_add_err:
+ /* unwind counter and free adapter struct */
+ netdev_info(pdev,
+ "%s: dfwd hardware acceleration failed\n", vdev->name);
+ clear_bit(pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+ kfree(fwd_adapter);
+ return ERR_PTR(err);
+}
+
+static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
+{
+ struct ixgbe_fwd_adapter *fwd_adapter = priv;
+ struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
+ unsigned int limit;
+
+ clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
+ adapter->num_rx_pools--;
+
+ limit = find_last_bit(&adapter->fwd_bitmask, 32);
+ adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
+ ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+ ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
+ netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+ fwd_adapter->pool, adapter->num_rx_pools,
+ fwd_adapter->rx_base_queue,
+ fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
+ adapter->fwd_bitmask);
+ kfree(fwd_adapter);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
-#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
-#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7322,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_do_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
@@ -7349,6 +7958,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
+ .ndo_dfwd_add_station = ixgbe_fwd_add,
+ .ndo_dfwd_del_station = ixgbe_fwd_del,
};
/**
@@ -7362,19 +7973,16 @@ static const struct net_device_ops ixgbe_netdev_ops = {
**/
static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *entry;
int physfns = 0;
- /* Some cards can not use the generic count PCIe functions method, and
- * so must be hardcoded to the correct value.
+ /* Some cards can not use the generic count PCIe functions method,
+ * because they are behind a parent switch, so we hardcode these with
+ * the correct number of functions.
*/
- switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_SFP_SF_QP:
- case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ if (ixgbe_pcie_from_parent(&adapter->hw)) {
physfns = 4;
- break;
- default:
+ } else {
list_for_each(entry, &adapter->pdev->bus_list) {
struct pci_dev *pdev =
list_entry(entry, struct pci_dev, bus_list);
@@ -7408,6 +8016,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
/* only support first port */
if (hw->bus.func != 0)
@@ -7490,19 +8099,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
@@ -7548,6 +8152,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -7567,6 +8172,10 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* EEPROM */
memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+ if (ixgbe_removed(hw->hw_addr)) {
+ err = -EIO;
+ goto err_ioremap;
+ }
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
if (!(eec & (1 << 8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
@@ -7589,10 +8198,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Cache if MNG FW is up so we don't have to read the REG later */
- if (hw->mac.ops.mng_fw_enabled)
- hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
-
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -7639,8 +8244,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
- pci_sriov_set_totalvfs(pdev, 63);
skip_sriov:
#endif
@@ -7655,7 +8260,7 @@ skip_sriov:
NETIF_F_RXHASH |
NETIF_F_RXCSUM;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -7730,10 +8335,17 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
+ if (ixgbe_removed(hw->hw_addr)) {
+ err = -EIO;
+ goto err_sw_init;
+ }
INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
err = ixgbe_init_interrupt_scheme(adapter);
@@ -7759,29 +8371,6 @@ skip_sriov:
if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
- /* print bus type/speed/width info */
- e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
- "Unknown"),
- (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
- hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
- hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
- "Unknown"),
- netdev->dev_addr);
-
- err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
- if (err)
- strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
- else
- e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
- hw->mac.type, hw->phy.type, part_str);
-
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these
@@ -7797,6 +8386,19 @@ skip_sriov:
}
ixgbe_check_minimum_link(adapter, expected_gts);
+ err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+ if (err)
+ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+ e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, hw->phy.sfp_type,
+ part_str);
+ else
+ e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+ hw->mac.type, hw->phy.type, part_str);
+
+ e_dev_info("%pM\n", netdev->dev_addr);
+
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
if (err == IXGBE_ERR_EEPROM_VERSION) {
@@ -7852,8 +8454,8 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
- /* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
+ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
+ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -7866,7 +8468,8 @@ err_register:
err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -7874,7 +8477,8 @@ err_alloc_etherdev:
pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -7894,7 +8498,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
ixgbe_dbg_adapter_exit(adapter);
- set_bit(__IXGBE_DOWN, &adapter->state);
+ set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
@@ -7933,17 +8537,19 @@ static void ixgbe_remove(struct pci_dev *pdev)
kfree(adapter->ixgbe_ieee_ets);
#endif
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -7961,6 +8567,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = adapter->netdev;
#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *bdev, *vfdev;
u32 dw0, dw1, dw2, dw3;
int vf, pos;
@@ -7981,10 +8588,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (!pos)
goto skip_bad_vf_detection;
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+ dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
+ dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
+ dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
+ dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
+ if (ixgbe_removed(hw->hw_addr))
+ goto skip_bad_vf_detection;
req_id = dw1 >> 16;
/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8047,14 +8656,23 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
skip_bad_vf_detection:
#endif /* CONFIG_PCI_IOV */
+ if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbe_down(adapter);
- pci_disable_device(pdev);
+
+ if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -8076,6 +8694,9 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
e_err(probe, "Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
+ smp_mb__before_atomic();
+ clear_bit(__IXGBE_DISABLED, &adapter->state);
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index d4a64e66539..1918e0abf73 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -27,8 +28,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
-#include "ixgbe_type.h"
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_mbx.h"
/**
@@ -223,7 +223,7 @@ out:
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -407,7 +407,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e44ff47659b..a5cb755de3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -53,11 +54,11 @@
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
+ * this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
+ * this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
+ clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e4c676006be..ff68b7a9def 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -29,7 +30,7 @@
#include <linux/delay.h>
#include <linux/sched.h>
-#include "ixgbe_common.h"
+#include "ixgbe.h"
#include "ixgbe_phy.h"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
@@ -46,6 +47,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_identify_phy_generic - Get physical layer module
@@ -65,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
+ ixgbe_get_phy_type_from_id(hw->phy.id);
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.ops.read_reg(hw,
@@ -97,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return false since the link can not be blocked
+ * via this method.
+ **/
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ hw_dbg(hw, "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ixgbe_get_phy_id - Get the phy type
* @hw: pointer to hardware structure
*
@@ -108,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
- &phy_id_high);
+ &phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
- &phy_id_low);
+ &phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
@@ -171,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
goto out;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -286,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
s32 status;
u16 gssr;
@@ -389,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @phy_data: Data to write to the PHY register
**/
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
s32 status;
u16 gssr;
@@ -475,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -500,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
- hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
+ hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
}
return status;
@@ -512,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
/*
@@ -546,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the link capabilities by reading the AUTOC register.
*/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_ERR_LINK_SETUP;
u16 speed_ability;
@@ -556,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*autoneg = true;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
+ &speed_ability);
if (status == 0) {
if (speed_ability & MDIO_SPEED_10G)
@@ -681,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -705,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
- hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
+ hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
}
return status;
@@ -758,15 +798,19 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 i;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- (phy_data | MDIO_CTRL1_RESET));
+ (phy_data | MDIO_CTRL1_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- &phy_data);
+ &phy_data);
if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
usleep_range(10000, 20000);
@@ -780,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto out;
@@ -794,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -815,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
- MDIO_MMD_PMAPMD, eword);
+ MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
phy_offset);
data_offset++;
@@ -966,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
+ ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
+ ixgbe_sfp_type_da_cu_core1;
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -991,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
+ ixgbe_sfp_type_srlr_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1043,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
if (status != 0)
goto err_read_i2c_eeprom;
@@ -1131,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = 0;
} else {
if (hw->allow_unsupported_sfp) {
- e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.");
+ e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = 0;
} else {
hw_dbg(hw,
@@ -1164,7 +1208,7 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
@@ -1359,8 +1403,8 @@ err_read_i2c_eeprom:
* so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
@@ -1449,11 +1493,11 @@ err_phy:
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1481,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
+ u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1498,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
s32 status = 0;
u32 max_retry = 10;
@@ -1587,7 +1631,7 @@ read_byte_out:
* a specified device address.
**/
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
s32 status = 0;
u32 max_retry = 1;
@@ -2002,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
+ MDIO_MMD_PMAPMD, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 24af12e3719..54071ed17e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -57,29 +58,25 @@
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
-#define IXGBE_SFF_1GBASET_CAPABLE 0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
-#define IXGBE_SFF_ADDRESSING_MODE 0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
+#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
-#define IXGBE_I2C_EEPROM_READ_MASK 0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
-
+#define IXGBE_I2C_EEPROM_READ_MASK 0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -117,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
+ u32 device_type, u16 phy_data);
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
+ u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a1a7d..68f87ecb8a7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -20,12 +20,12 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
-#include <linux/export.h>
#include <linux/ptp_classify.h>
/*
@@ -333,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * ixgbe_ptp_enable
+ * ixgbe_ptp_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
@@ -341,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
-static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -434,10 +434,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_ring *rx_ring;
u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
unsigned long rx_event;
- int n;
/* if we don't have a valid timestamp in the registers, just update the
* timeout counter and exit
@@ -449,18 +447,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
/* determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check;
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
- }
+ if (time_after(adapter->last_rx_timestamp, rx_event))
+ rx_event = adapter->last_rx_timestamp;
/* only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5*HZ)) {
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
adapter->last_rx_ptp_check = jiffies;
- e_warn(drv, "clearing RX Timestamp hang");
+ e_warn(drv, "clearing RX Timestamp hang\n");
}
}
@@ -492,6 +487,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -511,14 +507,11 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
IXGBE_PTP_TX_TIMEOUT);
u32 tsynctxctl;
- /* we have to have a valid skb */
- if (!adapter->ptp_tx_skb)
- return;
-
if (timeout) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
- e_warn(drv, "clearing Tx Timestamp hang");
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
+ e_warn(drv, "clearing Tx Timestamp hang\n");
return;
}
@@ -531,35 +524,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
}
/**
- * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
- * @q_vector: structure containing interrupt and ring information
+ * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * @adapter: pointer to adapter struct
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
+ struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps *shhwtstamps;
u64 regval = 0, ns;
u32 tsyncrxctl;
unsigned long flags;
- /* we cannot process timestamps on a ring without a q_vector */
- if (!q_vector || !q_vector->adapter)
- return;
-
- adapter = q_vector->adapter;
- hw = &adapter->hw;
-
- /*
- * Read the tsyncrxctl register afterwards in order to prevent taking an
- * I/O hit on every packet.
- */
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
@@ -567,23 +547,34 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
-
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp = ns_to_ktime(ns);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
+}
+
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
}
/**
- * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
- * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
- * @cmd: particular ioctl requested
+ * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
+ * @adapter: the private ixgbe adapter structure
+ * @config: the hwtstamp configuration requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
+ * disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
@@ -598,26 +589,25 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
+ *
+ * Note: this may modify the hwtstamp configuration towards a more general
+ * mode, if required to support the specifically requested mode.
*/
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd)
+static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -626,7 +616,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
@@ -650,7 +640,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
@@ -661,7 +651,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -680,7 +670,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
else
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
-
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -702,6 +691,33 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ *
+ * Set hardware to requested mode. If unsupported, return an error with no
+ * changes. Otherwise, store the mode for future reference.
+ */
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -795,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
- * When the MAC resets, all timesync features are reset. This function should be
- * called to re-enable the PTP clock structure. It will re-init the timecounter
- * structure based on the kernel time as well as setup the cycle counter data.
+ * When the MAC resets, all the hardware bits for timesync are reset. This
+ * function is used to re-enable the device for PTP based on current settings.
+ * We do lose the current clock time, so just reset the cyclecounter to the
+ * system real clock time.
+ *
+ * This function will maintain hwtstamp_config settings, and resets the SDP
+ * output if it was enabled.
*/
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
{
@@ -809,6 +829,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
+ /* reset the hardware timestamping mode */
+ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
ixgbe_ptp_start_cyclecounter(adapter);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -827,20 +850,29 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_init
+ * ixgbe_ptp_create_clock
* @adapter: the ixgbe private adapter structure
*
- * This function performs the required steps for enabling ptp
- * support. If ptp support has already been loaded it simply calls the
- * cyclecounter init routine and exits.
+ * This function performs setup of the user entry point function table and
+ * initializes the PTP clock device, which is used to access the clock-like
+ * features of the PTP core. It will be called by ixgbe_ptp_init, only if
+ * there isn't already a clock device (such as after a suspend/resume cycle,
+ * where the clock device wasn't destroyed).
*/
-void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+ return 0;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -851,10 +883,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -865,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
default:
adapter->ptp_clock = NULL;
- return;
+ return -EOPNOTSUPP;
}
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
+ err = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
+ return err;
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ /* set default timestamp mode to disabled here. We do this in
+ * create_clock instead of init, because we don't want to override the
+ * previous settings during a resume cycle.
+ */
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling PTP
+ * support. If PTP support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+ /* initialize the spin lock first since we can't control when a user
+ * will call the entry functions once we have initialized the clock
+ * device
+ */
+ spin_lock_init(&adapter->tmreg_lock);
+
+ /* obtain a PTP device, or re-use an existing device */
+ if (ixgbe_ptp_create_clock(adapter))
+ return;
+
+ /* we have a clock so we can initialize work now */
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
+
+ /* reset the PTP related hardware bits */
ixgbe_ptp_reset(adapter);
/* enter the IXGBE_PTP_RUNNING state */
@@ -892,27 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_stop - disable ptp device and stop the overflow check
- * @adapter: pointer to adapter struct
+ * ixgbe_ptp_suspend - stop PTP work items
+ * @ adapter: pointer to adapter struct
*
- * this function stops the ptp support, and cancels the delayed work.
+ * this function suspends PTP activity, and prevents more PTP work from being
+ * generated, but does not destroy the PTP clock device.
*/
-void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
{
/* Leave the IXGBE_PTP_RUNNING state. */
if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
return;
- /* stop the PPS signal */
- adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
- ixgbe_ptp_setup_sdp(adapter);
+ /* since this might be called in suspend, we don't clear the state,
+ * but simply reset the auxiliary PPS signal control register
+ */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
+ /* ensure that we cancel any pending PTP Tx work item in progress */
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * ixgbe_ptp_stop - close the PTP device
+ * @adapter: pointer to adapter struct
+ *
+ * completely destroy the PTP device, should only be called when the device is
+ * being fully closed.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+ /* first, suspend PTP activity */
+ ixgbe_ptp_suspend(adapter);
+ /* disable the PTP clock device */
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 276d7b13533..16b3a1cd9db 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -71,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -129,10 +128,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
if (!pre_existing_vfs && !adapter->num_vfs)
return;
- if (!pre_existing_vfs)
- dev_warn(&adapter->pdev->dev,
- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
-
/* If there are pre-existing VFs then we have to force
* use of that many - over ride any module parameter value.
* This may result from the user unloading the PF driver
@@ -152,7 +147,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
* physical function. If the user requests greater thn
* 63 VFs then it is an error - reset to default of zero.
*/
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT);
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
if (err) {
@@ -223,17 +218,19 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_FLUSH(hw);
/* Disable VMDq flag so device will be set in VM mode */
- if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
- adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ } else {
+ rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus());
+ }
- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
adapter->ring_feature[RING_F_RSS].limit = rss;
/* take a breather then clean up driver data */
msleep(100);
-
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
return 0;
}
@@ -259,7 +256,7 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
* PF. The PCI bus driver already checks for other values out of
* range.
*/
- if (num_vfs > 63) {
+ if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
err = -EPERM;
goto err_out;
}
@@ -293,18 +290,17 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
+#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+#endif
err = ixgbe_disable_sriov(adapter);
/* Only reinit if no error and state changed */
- if (!err && current_flags != adapter->flags) {
- /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
- adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
#ifdef CONFIG_PCI_IOV
+ if (!err && current_flags != adapter->flags)
ixgbe_sriov_reinit(adapter);
#endif
- }
return err;
}
@@ -329,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -355,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -384,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -393,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -497,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -516,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -546,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -555,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -567,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -578,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -614,34 +603,34 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
- unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
bool enable = ((event_mask & 0x10000000U) != 0);
- if (enable) {
- eth_zero_addr(vf_mac_addr);
- memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
- }
+ if (enable)
+ eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
return 0;
}
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, reg_offset, vf_shift;
u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
+ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ int i;
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -660,6 +649,17 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* force drop enable for all VF Rx queues */
+ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) {
+ /* flush previous write */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* indicate to hardware that we want to set drop enable */
+ reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= 1 << vf_shift;
@@ -690,6 +690,15 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers
+ * which are not cleared by an FLR
+ */
+ for (i = 0; i < q_per_pool; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0);
+ }
+
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET;
if (!is_zero_ether_addr(vf_mac)) {
@@ -723,8 +732,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
}
if (adapter->vfinfo[vf].pf_set_mac &&
- memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
- ETH_ALEN)) {
+ !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
@@ -1121,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
adapter->vfinfo[vf].vlan_count--;
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
- }
+ }
out:
- return err;
+ return err;
}
static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1214,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
}
}
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int link_speed;
@@ -1232,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (link_speed != 10000)
return -EINVAL;
+ if (min_tx_rate)
+ return -EINVAL;
+
/* rate limit cannot be less than 10Mbs or greater than link speed */
- if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
return -EINVAL;
/* store values */
adapter->vf_rate_link_speed = link_speed;
- adapter->vfinfo[vf].tx_rate = tx_rate;
+ adapter->vfinfo[vf].tx_rate = max_tx_rate;
/* update hardware configuration */
ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1280,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4713f9fc7f4..32c26d586c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,7 +29,14 @@
#ifndef _IXGBE_SRIOV_H_
#define _IXGBE_SRIOV_H_
+/* ixgbe driver limit the max number of VFs could be enabled to
+ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1)
+ */
+#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -36,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index d118def16f3..ef6df3d6437 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -111,29 +112,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
unsigned int n_attr;
struct hwmon_attr *ixgbe_attr;
- n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
- ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
+ n_attr = adapter->ixgbe_hwmon_buff->n_hwmon;
+ ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr];
switch (type) {
case IXGBE_HWMON_TYPE_LOC:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_label", offset);
+ "temp%u_label", offset + 1);
break;
case IXGBE_HWMON_TYPE_TEMP:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_input", offset);
+ "temp%u_input", offset + 1);
break;
case IXGBE_HWMON_TYPE_CAUTION:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_max", offset);
+ "temp%u_max", offset + 1);
break;
case IXGBE_HWMON_TYPE_MAX:
ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
- "temp%u_crit", offset);
+ "temp%u_crit", offset + 1);
break;
default:
rc = -EPERM;
@@ -147,32 +148,17 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
ixgbe_attr->dev_attr.store = NULL;
ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
+ sysfs_attr_init(&ixgbe_attr->dev_attr.attr);
- rc = device_create_file(&adapter->pdev->dev,
- &ixgbe_attr->dev_attr);
+ adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr;
- if (rc == 0)
- ++adapter->ixgbe_hwmon_buff.n_hwmon;
+ ++adapter->ixgbe_hwmon_buff->n_hwmon;
- return rc;
+ return 0;
}
static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
{
- int i;
-
- if (adapter == NULL)
- return;
-
- for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
- device_remove_file(&adapter->pdev->dev,
- &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
- }
-
- kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
-
- if (adapter->ixgbe_hwmon_buff.device)
- hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
}
/* called from ixgbe_main.c */
@@ -184,9 +170,9 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
/* called from ixgbe_main.c */
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
{
- struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
+ struct hwmon_buff *ixgbe_hwmon;
+ struct device *hwmon_dev;
unsigned int i;
- int n_attrs;
int rc = 0;
/* If this method isn't defined we don't support thermals */
@@ -198,23 +184,13 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw))
goto exit;
- /*
- * Allocation space for max attributs
- * max num sensors * values (loc, temp, max, caution)
- */
- n_attrs = IXGBE_MAX_SENSORS * 4;
- ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
- if (!ixgbe_hwmon->hwmon_list) {
+ ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon),
+ GFP_KERNEL);
+ if (ixgbe_hwmon == NULL) {
rc = -ENOMEM;
- goto err;
- }
-
- ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
- if (IS_ERR(ixgbe_hwmon->device)) {
- rc = PTR_ERR(ixgbe_hwmon->device);
- goto err;
+ goto exit;
}
+ adapter->ixgbe_hwmon_buff = ixgbe_hwmon;
for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
/*
@@ -226,17 +202,28 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
/* Bail if any hwmon attr struct fails to initialize */
rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
- rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
if (rc)
- goto err;
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
+ if (rc)
+ goto exit;
+ rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
+ if (rc)
+ goto exit;
}
- goto exit;
+ ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group;
+ ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs;
-err:
- ixgbe_sysfs_del_adapter(adapter);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+ "ixgbe",
+ ixgbe_hwmon,
+ ixgbe_hwmon->groups);
+ if (IS_ERR(hwmon_dev))
+ rc = PTR_ERR(hwmon_dev);
exit:
return rc;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 10775cb9b6d..9a89f98b35f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -54,6 +55,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -158,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MAX_EITR 0x00000FF8
#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
+ (0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
#define IXGBE_EITR_LLI_MOD 0x00008000
#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -211,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D014 + (n-64)*0x40
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
@@ -220,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D00C + (n-64)*0x40
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
- /* 8 of these 0x03C00 - 0x03C1C */
+ /* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
#define IXGBE_DROPEN 0x03D04
#define IXGBE_RXPBSIZE_SHIFT 10
@@ -237,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
+ (0x0A200 + ((_i) * 8)))
#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
+ (0x0A204 + ((_i) * 8)))
#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
/* Packet split receive type */
#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
@@ -561,6 +563,10 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTDQSEL 0x04904
#define IXGBE_RTTDT1C 0x04908
#define IXGBE_RTTDT1S 0x0490C
+#define IXGBE_RTTQCNCR 0x08B00
+#define IXGBE_RTTQCNTG 0x04A90
+#define IXGBE_RTTBCNRD 0x0498C
+#define IXGBE_RTTQCNRR 0x0498C
#define IXGBE_RTTDTECC 0x04990
#define IXGBE_RTTDTECC_NO_BCN 0x00000100
#define IXGBE_RTTBCNRC 0x04984
@@ -570,6 +576,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
#define IXGBE_RTTBCNRM 0x04980
+#define IXGBE_RTTQCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -689,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
+ (0x08600 + ((_i) * 4)))
#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -813,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1389,10 +1396,10 @@ enum {
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -1604,6 +1611,9 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1783,6 +1793,9 @@ enum {
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
+
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
@@ -1848,8 +1861,19 @@ enum {
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -1975,9 +1999,10 @@ enum {
#define IXGBE_FWSM_TS_ENABLED 0x1
/* Queue Drop Enable */
-#define IXGBE_QDE_ENABLE 0x00000001
-#define IXGBE_QDE_IDX_MASK 0x00007F00
-#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_ENABLE 0x00000001
+#define IXGBE_QDE_IDX_MASK 0x00007F00
+#define IXGBE_QDE_IDX_SHIFT 8
+#define IXGBE_QDE_WRITE 0x00010000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
@@ -2136,18 +2161,18 @@ enum {
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2168,6 +2193,14 @@ enum {
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
+/* Translated register #defines */
+#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
+#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+
+#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
+#define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \
+ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index)))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
@@ -2360,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2402,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
@@ -2631,7 +2664,6 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
- ixgbe_media_type_fiber_fixed,
ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
@@ -2714,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
@@ -2808,7 +2840,7 @@ struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
+ u32 *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -2844,6 +2876,8 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2853,7 +2887,7 @@ struct ixgbe_mac_operations {
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
@@ -2887,7 +2921,6 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
- bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
};
struct ixgbe_phy_operations {
@@ -2943,7 +2976,6 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
- u32 cached_autoc;
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
@@ -3019,7 +3051,6 @@ struct ixgbe_hw {
bool adapter_stopped;
bool force_full_reset;
bool allow_unsupported_sfp;
- bool mng_fw_enabled;
bool wol_enabled;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 389324f5929..40dd798e129 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -20,6 +20,7 @@
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -32,12 +33,12 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES 128
-#define IXGBE_X540_MC_TBL_SIZE 128
-#define IXGBE_X540_VFT_TBL_SIZE 128
-#define IXGBE_X540_RX_PB_SIZE 384
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES 128
+#define IXGBE_X540_MC_TBL_SIZE 128
+#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -79,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
}
/**
@@ -153,7 +155,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -164,7 +166,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
goto out;
ret_val = ixgbe_start_hw_gen2(hw);
- hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
out:
return ret_val;
}
@@ -236,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -711,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
udelay(50);
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -812,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540,
+ &ixgbe_get_supported_physical_layer_X540,
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
@@ -854,7 +854,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
- .mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 3147795bd13..05e4f32d84f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
@@ -277,4 +278,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
+
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c9d0c12d6f0..d420f124633 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,16 +45,27 @@
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
+ struct {
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+ int saved_reset_offset;
+ };
};
-#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
- offsetof(struct ixgbevf_adapter, m), \
- offsetof(struct ixgbevf_adapter, b), \
- offsetof(struct ixgbevf_adapter, r)
+#define IXGBEVF_STAT(m, b, r) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
+ .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+}
+
+#define IXGBEVF_ZSTAT(m) { \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, m), \
+ .base_stat_offset = -1, \
+ .saved_reset_offset = -1 \
+}
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
@@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
stats.saved_reset_vfgorc)},
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
+ {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
+ {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
+ {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)},
- {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
- zero_base)},
- {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
- zero_base)},
- {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
- zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
+#ifdef BP_EXTENDED_STATS
+ {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
+ {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
+ {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
+ {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
+ {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
+ {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
+#endif
};
#define IXGBE_QUEUE_STATS_LEN 0
@@ -119,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -140,58 +156,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-static char *ixgbevf_reg_names[] = {
- "IXGBE_VFCTRL",
- "IXGBE_VFSTATUS",
- "IXGBE_VFLINKS",
- "IXGBE_VFRXMEMWRAP",
- "IXGBE_VFFRTIMER",
- "IXGBE_VTEICR",
- "IXGBE_VTEICS",
- "IXGBE_VTEIMS",
- "IXGBE_VTEIMC",
- "IXGBE_VTEIAC",
- "IXGBE_VTEIAM",
- "IXGBE_VTEITR",
- "IXGBE_VTIVAR",
- "IXGBE_VTIVAR_MISC",
- "IXGBE_VFRDBAL0",
- "IXGBE_VFRDBAL1",
- "IXGBE_VFRDBAH0",
- "IXGBE_VFRDBAH1",
- "IXGBE_VFRDLEN0",
- "IXGBE_VFRDLEN1",
- "IXGBE_VFRDH0",
- "IXGBE_VFRDH1",
- "IXGBE_VFRDT0",
- "IXGBE_VFRDT1",
- "IXGBE_VFRXDCTL0",
- "IXGBE_VFRXDCTL1",
- "IXGBE_VFSRRCTL0",
- "IXGBE_VFSRRCTL1",
- "IXGBE_VFPSRTYPE",
- "IXGBE_VFTDBAL0",
- "IXGBE_VFTDBAL1",
- "IXGBE_VFTDBAH0",
- "IXGBE_VFTDBAH1",
- "IXGBE_VFTDLEN0",
- "IXGBE_VFTDLEN1",
- "IXGBE_VFTDH0",
- "IXGBE_VFTDH1",
- "IXGBE_VFTDT0",
- "IXGBE_VFTDT1",
- "IXGBE_VFTXDCTL0",
- "IXGBE_VFTXDCTL1",
- "IXGBE_VFTDWBAL0",
- "IXGBE_VFTDWBAL1",
- "IXGBE_VFTDWBAH0",
- "IXGBE_VFTDWBAH1"
-};
-
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
- return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+ return IXGBE_REGS_LEN * sizeof(u32);
}
static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +232,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
for (i = 0; i < 2; i++)
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
- for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
- hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
}
static void ixgbevf_get_drvinfo(struct net_device *netdev,
@@ -321,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -338,20 +303,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_tx_queues; i++) {
/* clone ring and setup updated count */
- tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
- }
+ err = ixgbevf_setup_tx_resources(&tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(&tx_ring[i]);
+ }
- vfree(tx_ring);
- tx_ring = NULL;
+ vfree(tx_ring);
+ tx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -364,20 +329,20 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
/* clone ring and setup updated count */
- rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (!err)
- continue;
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
+ err = ixgbevf_setup_rx_resources(&rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(&rx_ring[i]);
+ }
- vfree(rx_ring);
- rx_ring = NULL;
+ vfree(rx_ring);
+ rx_ring = NULL;
- goto clear_reset;
+ goto clear_reset;
+ }
}
}
@@ -387,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
- adapter->tx_ring[i] = tx_ring[i];
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
+ *adapter->tx_ring[i] = tx_ring[i];
}
adapter->tx_ring_count = new_tx_count;
@@ -400,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */
if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
- adapter->rx_ring[i] = rx_ring[i];
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
+ *adapter->rx_ring[i] = rx_ring[i];
}
adapter->rx_ring_count = new_rx_count;
@@ -417,7 +380,7 @@ clear_reset:
/* free Tx resources if Rx error is encountered */
if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring);
}
@@ -441,22 +404,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ char *base = (char *) adapter;
int i;
+#ifdef BP_EXTENDED_STATS
+ u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
+ tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_yields += adapter->rx_ring[i]->stats.yields;
+ rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+ rx_yields += adapter->rx_ring[i]->stats.yields;
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_yields += adapter->tx_ring[i]->stats.yields;
+ tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+ tx_yields += adapter->tx_ring[i]->stats.yields;
+ }
+
+ adapter->bp_rx_yields = rx_yields;
+ adapter->bp_rx_cleaned = rx_cleaned;
+ adapter->bp_rx_missed = rx_missed;
+
+ adapter->bp_tx_yields = tx_yields;
+ adapter->bp_tx_cleaned = tx_cleaned;
+ adapter->bp_tx_missed = tx_missed;
+#endif
ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = (char *)adapter +
- ixgbe_gstrings_stats[i].stat_offset;
- char *b = (char *)adapter +
- ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = (char *)adapter +
- ixgbe_gstrings_stats[i].saved_reset_offset;
- data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
- ((ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
+ char *p = base + ixgbe_gstrings_stats[i].stat_offset;
+ char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
+ char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
+
+ if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
+ else
+ data[i] = *(u64 *)p;
+ } else {
+ if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
+ data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
+ else
+ data[i] = *(u32 *)p;
+ }
}
}
@@ -539,41 +530,55 @@ static const u32 register_test_patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((register_test_patterns[pat] & W), \
- (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (register_test_patterns[pat] & W & M)) { \
- hw_dbg(&adapter->hw, \
- "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (register_test_patterns[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
+static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 pat, val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg,
+ register_test_patterns[pat] & write);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if (val != (register_test_patterns[pat] & write & mask)) {
+ hw_dbg(&adapter->hw,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+ reg, val,
+ register_test_patterns[pat] & write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ }
+ return false;
}
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- pr_err("set/check reg %04X test failed: got 0x%08X expected " \
- "0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
+static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
+ int reg, u32 mask, u32 write)
+{
+ u32 val, before;
+
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ *data = 1;
+ return true;
+ }
+ before = ixgbevf_read_reg(&adapter->hw, reg);
+ ixgbe_write_reg(&adapter->hw, reg, write & mask);
+ val = ixgbevf_read_reg(&adapter->hw, reg);
+ if ((write & mask) != (val & mask)) {
+ pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), write & mask);
+ *data = reg;
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return true;
+ }
+ ixgbe_write_reg(&adapter->hw, reg, before);
+ return false;
}
static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -581,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
const struct ixgbevf_reg_test *test;
u32 i;
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - register test blocked\n");
+ *data = 1;
+ return 1;
+ }
test = reg_test_vf;
/*
@@ -589,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
*/
while (test->reg) {
for (i = 0; i < test->array_len; i++) {
+ bool b = false;
+
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ b = reg_set_and_check(adapter, data,
+ test->reg + (i * 0x40),
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
- writel(test->write,
- (adapter->hw.hw_addr + test->reg)
- + (i * 0x40));
+ ixgbe_write_reg(&adapter->hw,
+ test->reg + (i * 0x40),
+ test->write);
break;
case TABLE32_TEST:
- REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 4),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
- REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + (i * 8),
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
- REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ b = reg_pattern_test(adapter, data,
+ test->reg + 4 + (i * 8),
+ test->mask,
+ test->write);
break;
}
+ if (b)
+ return 1;
}
test++;
}
@@ -635,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
+ if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter removed - test blocked\n");
+ data[0] = 1;
+ data[1] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
set_bit(__IXGBEVF_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
@@ -685,6 +713,85 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
+static int ixgbevf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only valid if in constant ITR mode */
+ if (adapter->rx_itr_setting <= 1)
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting;
+ else
+ ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+
+ /* if in mixed tx/rx queues per vector mode, report only rx settings */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ return 0;
+
+ /* only valid if in constant ITR mode */
+ if (adapter->tx_itr_setting <= 1)
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting;
+ else
+ ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
+
+ return 0;
+}
+
+static int ixgbevf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_q_vector *q_vector;
+ int num_vectors, i;
+ u16 tx_itr_param, rx_itr_param;
+
+ /* don't accept tx specific changes if we've got mixed RxTx vectors */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
+ && ec->tx_coalesce_usecs)
+ return -EINVAL;
+
+
+ if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
+ (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
+ return -EINVAL;
+
+ if (ec->rx_coalesce_usecs > 1)
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
+ else
+ adapter->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ if (adapter->rx_itr_setting == 1)
+ rx_itr_param = IXGBE_20K_ITR;
+ else
+ rx_itr_param = adapter->rx_itr_setting;
+
+
+ if (ec->tx_coalesce_usecs > 1)
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
+ else
+ adapter->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (adapter->tx_itr_setting == 1)
+ tx_itr_param = IXGBE_10K_ITR;
+ else
+ tx_itr_param = adapter->tx_itr_setting;
+
+ num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < num_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (q_vector->tx.count && !q_vector->rx.count)
+ /* tx only */
+ q_vector->itr = tx_itr_param;
+ else
+ /* rx only or mixed */
+ q_vector->itr = rx_itr_param;
+ ixgbevf_write_eitr(q_vector);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
@@ -700,9 +807,11 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_sset_count = ixgbevf_get_sset_count,
.get_strings = ixgbevf_get_strings,
.get_ethtool_stats = ixgbevf_get_ethtool_stats,
+ .get_coalesce = ixgbevf_get_coalesce,
+ .set_coalesce = ixgbevf_set_coalesce,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+ netdev->ethtool_ops = &ixgbevf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fff0d986752..a0a1de9ce23 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -38,15 +38,23 @@
#include "vf.h"
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#define BP_EXTENDED_STATS
+#endif
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch;
- u16 length;
- u16 mapped_as_page;
+ unsigned long time_stamp;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbevf_rx_buffer {
@@ -54,6 +62,29 @@ struct ixgbevf_rx_buffer {
dma_addr_t dma;
};
+struct ixgbevf_stats {
+ u64 packets;
+ u64 bytes;
+#ifdef BP_EXTENDED_STATS
+ u64 yields;
+ u64 misses;
+ u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+ u64 csum_err;
+};
+
struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
@@ -65,26 +96,27 @@ struct ixgbevf_ring {
unsigned int next_to_use;
unsigned int next_to_clean;
- int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info;
};
- u64 total_bytes;
- u64 total_packets;
- struct u64_stats_sync syncp;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
+ struct ixgbevf_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ixgbevf_tx_queue_stats tx_stats;
+ struct ixgbevf_rx_queue_stats rx_stats;
+ };
- u16 head;
- u16 tail;
+ u64 hw_csum_rx_error;
+ u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCB and RSS modes */
u16 rx_buf_len;
+ int queue_index; /* needed for multiqueue queue management */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -115,8 +147,6 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -145,7 +175,119 @@ struct ixgbevf_q_vector {
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
char name[IFNAMSIZ + 9];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define IXGBEVF_QV_STATE_IDLE 0
+#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
+#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
+#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
+#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
+#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
+#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
+#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
+#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
+#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
+{
+
+ spin_lock_init(&q_vector->lock);
+ q_vector->state = IXGBEVF_QV_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a q_vector */
+static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_LOCKED) {
+ WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
+ q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->tx.ring->stats.yields++;
+#endif
+ } else {
+ /* we don't care if someone yielded */
+ q_vector->state = IXGBEVF_QV_STATE_NAPI;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the qv while napi had it */
+static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
+ IXGBEVF_QV_STATE_NAPI_YIELD));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* called from ixgbevf_low_latency_poll() */
+static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
+ q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
+ rc = false;
+#ifdef BP_EXTENDED_STATS
+ q_vector->rx.ring->stats.yields++;
+#endif
+ } else {
+ /* preserve yield marks */
+ q_vector->state |= IXGBEVF_QV_STATE_POLL;
+ }
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the qv while it was locked */
+static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = false;
+ spin_lock_bh(&q_vector->lock);
+ WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
+
+ if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
+ rc = true;
+ /* reset state to idle, unless QV is disabled */
+ q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
+{
+ WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
+ return q_vector->state & IXGBEVF_QV_USER_PEND;
+}
+
+/* false if QV is currently owned */
+static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
+{
+ int rc = true;
+ spin_lock_bh(&q_vector->lock);
+ if (q_vector->state & IXGBEVF_QV_OWNED)
+ rc = false;
+ q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
+ spin_unlock_bh(&q_vector->lock);
+ return rc;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/*
* microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -165,9 +307,18 @@ struct ixgbevf_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
+{
+ writel(value, ring->tail);
+}
#define IXGBEVF_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -190,7 +341,6 @@ struct ixgbevf_q_vector {
struct ixgbevf_adapter {
struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
@@ -203,25 +353,18 @@ struct ixgbevf_adapter {
u32 eims_other;
/* TX */
- struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues;
+ struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
/* RX */
- struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
+ struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -231,6 +374,9 @@ struct ixgbevf_adapter {
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
+#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
+
+ struct msix_entry *msix_entries;
/* OS defined structs */
struct net_device *netdev;
@@ -239,28 +385,43 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw;
u16 msg_enable;
- struct ixgbevf_hw_stats stats;
- u64 zero_base;
+ u16 bd_number;
/* Interrupt Throttle Rate */
u32 eitr_param;
+ struct ixgbevf_hw_stats stats;
+
unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
+#ifdef BP_EXTENDED_STATS
+ u64 bp_rx_yields;
+ u64 bp_rx_cleaned;
+ u64 bp_rx_missed;
+
+ u64 bp_tx_yields;
+ u64 bp_tx_cleaned;
+ u64 bp_tx_missed;
+#endif
+
+ u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
- struct work_struct watchdog_task;
-
spinlock_t mbx_lock;
+
+ struct work_struct watchdog_task;
};
enum ixbgevf_state_t {
__IXGBEVF_TESTING,
__IXGBEVF_RESETTING,
- __IXGBEVF_DOWN
+ __IXGBEVF_DOWN,
+ __IXGBEVF_DISABLED,
+ __IXGBEVF_REMOVING,
+ __IXGBEVF_WORK_INIT,
};
struct ixgbevf_cb {
@@ -281,27 +442,25 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
- struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
#else
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 59a62bbfb37..75467f83772 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.7.12-k"
+#define DRV_VERSION "2.12.1-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
@@ -95,13 +95,59 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
-static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbevf_ring *rx_ring,
+static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+
+ if (!hw->hw_addr)
+ return;
+ hw->hw_addr = NULL;
+ dev_err(&adapter->pdev->dev, "Adapter removed\n");
+ if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+ schedule_work(&adapter->watchdog_task);
+}
+
+static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+ u32 value;
+
+ /* The following check not only optimizes a bit by not
+ * performing a read on the status register when the
+ * register just read was a status register read that
+ * returned IXGBE_FAILED_READ_REG. It also blocks any
+ * potential recursion.
+ */
+ if (reg == IXGBE_VFSTATUS) {
+ ixgbevf_remove_adapter(hw);
+ return;
+ }
+ value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
+ if (value == IXGBE_FAILED_READ_REG)
+ ixgbevf_remove_adapter(hw);
+}
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (IXGBE_REMOVED(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbevf_check_remove(hw, reg);
+ return value;
+}
+
+static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
u32 val)
{
+ rx_ring->next_to_use = val;
+
/*
* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
@@ -109,7 +155,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+ ixgbevf_write_tail(rx_ring, val);
}
/**
@@ -143,28 +189,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
- struct ixgbevf_tx_buffer
- *tx_buffer_info)
-{
- if (tx_buffer_info->dma) {
- if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
- DMA_TO_DEVICE);
- else
+ struct ixgbevf_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->skb) {
+ dev_kfree_skb_any(tx_buffer->skb);
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
- tx_buffer_info->dma,
- tx_buffer_info->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- tx_buffer_info->dma = 0;
- }
- if (tx_buffer_info->skb) {
- dev_kfree_skb_any(tx_buffer_info->skb);
- tx_buffer_info->skb = NULL;
+ } else if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
- tx_buffer_info->time_stamp = 0;
- /* tx_buffer_info must be completely set up in the transmit path */
+ tx_buffer->next_to_watch = NULL;
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* tx_buffer must be completely set up in the transmit path */
}
#define IXGBE_MAX_TXD_PWR 14
@@ -185,20 +228,21 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
- union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, count = 0;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = tx_ring->count / 2;
+ unsigned int i = tx_ring->next_to_clean;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
- i = tx_ring->next_to_clean;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- eop_desc = tx_buffer_info->next_to_watch;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
do {
- bool cleaned = false;
+ union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
@@ -212,67 +256,90 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* clear next_to_watch to prevent false hangs */
- tx_buffer_info->next_to_watch = NULL;
+ tx_buffer->next_to_watch = NULL;
- for ( ; !cleaned; count++) {
- struct sk_buff *skb;
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- cleaned = (tx_desc == eop_desc);
- skb = tx_buffer_info->skb;
-
- if (cleaned && skb) {
- unsigned int segs, bytecount;
-
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_packets += segs;
- total_bytes += bytecount;
- }
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ dev_kfree_skb_any(tx_buffer->skb);
- ixgbevf_unmap_and_free_tx_resource(tx_ring,
- tx_buffer_info);
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
- tx_desc->wb.status = 0;
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
i++;
- if (i == tx_ring->count)
- i = 0;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
- eop_desc = tx_buffer_info->next_to_watch;
- } while (count < tx_ring->count);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+ i += tx_ring->count;
tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
+
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
}
}
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
-
- return count < tx_ring->count;
+ return !!budget;
}
/**
@@ -300,6 +367,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
}
/**
+ * ixgbevf_rx_skb - Helper function to determine proper Rx method
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb_mark_napi_id(skb, &q_vector->napi);
+
+ if (ixgbevf_qv_busy_polling(q_vector)) {
+ netif_receive_skb(skb);
+ /* exit early if we busy polled */
+ return;
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+ ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
+}
+
+/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
@@ -317,7 +408,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
@@ -325,51 +416,46 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- ring->hw_csum_rx_error++;
+ ring->rx_stats.csum_err++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- ring->hw_csum_rx_good++;
}
/**
* ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
**/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
int cleaned_count)
{
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
unsigned int i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
-
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
if (!bi->skb) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
- if (!skb) {
- adapter->alloc_rx_buff_failed++;
+ if (!skb)
goto no_buffers;
- }
+
bi->skb = skb;
- bi->dma = dma_map_single(&pdev->dev, skb->data,
+ bi->dma = dma_map_single(rx_ring->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
dev_kfree_skb(skb);
bi->skb = NULL;
- dev_err(&pdev->dev, "RX DMA map failed\n");
+ dev_err(rx_ring->dev, "Rx DMA map failed\n");
break;
}
}
@@ -378,14 +464,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
- }
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ if (rx_ring->next_to_use != i)
+ ixgbevf_release_rx_desc(rx_ring, i);
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
@@ -396,12 +480,10 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
-static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *rx_ring,
- int budget)
+static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int budget)
{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
@@ -427,7 +509,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -447,7 +529,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
skb->next = next_buffer->skb;
IXGBE_CB(skb->next)->prev = skb;
- adapter->non_eop_descs++;
+ rx_ring->rx_stats.non_eop_descs++;
goto next_desc;
}
@@ -473,36 +555,27 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
total_rx_bytes += skb->len;
total_rx_packets++;
- /*
- * Work around issue of some types of VM to VM loop back
- * packets not getting split correctly
- */
- if (staterr & IXGBE_RXD_STAT_LB) {
- u32 header_fixup_len = skb_headlen(skb);
- if (header_fixup_len < 14)
- skb_push(skb, header_fixup_len);
- }
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
/* Workaround hardware that can't do proper VEPA multicast
* source pruning.
*/
- if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- ether_addr_equal(adapter->netdev->dev_addr,
+ if ((skb->pkt_type == PACKET_BROADCAST ||
+ skb->pkt_type == PACKET_MULTICAST) &&
+ ether_addr_equal(rx_ring->netdev->dev_addr,
eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
- ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
+ ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
next_desc:
rx_desc->wb.upper.status_error = 0;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
- ixgbevf_alloc_rx_buffers(adapter, rx_ring,
- cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
@@ -514,19 +587,19 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbevf_desc_unused(rx_ring);
if (cleaned_count)
- ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- return !!budget;
+ return total_rx_packets;
}
/**
@@ -549,6 +622,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ if (!ixgbevf_qv_lock_napi(q_vector))
+ return budget;
+#endif
+
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
@@ -558,10 +636,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
- clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
- per_ring_budget);
+ clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget)
+ < per_ring_budget);
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_unlock_napi(q_vector);
+#endif
+
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -569,7 +652,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx);
@@ -580,7 +664,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
* ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information
*/
-static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw;
@@ -596,6 +680,40 @@ static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *ring;
+ int found = 0;
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return LL_FLUSH_FAILED;
+
+ if (!ixgbevf_qv_lock_poll(q_vector))
+ return LL_FLUSH_BUSY;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx) {
+ found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
+#ifdef BP_EXTENDED_STATS
+ if (found)
+ ring->stats.cleaned += found;
+ else
+ ring->stats.misses++;
+#endif
+ if (found)
+ break;
+ }
+
+ ixgbevf_qv_unlock_poll(q_vector);
+
+ return found;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -756,37 +874,13 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
- struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 msg;
- bool got_ack = false;
hw->mac.get_link_status = 1;
- if (!hw->mbx.ops.check_for_ack(hw))
- got_ack = true;
- if (!hw->mbx.ops.check_for_msg(hw)) {
- hw->mbx.ops.read(hw, &msg, 1);
-
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
- adapter->link_up = false;
- }
-
- if (msg & IXGBE_VT_MSGTYPE_NACK)
- dev_info(&pdev->dev,
- "Last Request of type %2.2x to PF Nacked\n",
- msg & 0xFF);
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
- }
-
- /* checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it
- */
- if (got_ack)
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -814,8 +908,8 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->rx_ring[r_idx].next = q_vector->rx.ring;
- q_vector->rx.ring = &a->rx_ring[r_idx];
+ a->rx_ring[r_idx]->next = q_vector->rx.ring;
+ q_vector->rx.ring = a->rx_ring[r_idx];
q_vector->rx.count++;
}
@@ -824,8 +918,8 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- a->tx_ring[t_idx].next = q_vector->tx.ring;
- q_vector->tx.ring = &a->tx_ring[t_idx];
+ a->tx_ring[t_idx]->next = q_vector->tx.ring;
+ q_vector->tx.ring = a->tx_ring[t_idx];
q_vector->tx.count++;
}
@@ -1053,6 +1147,70 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
}
/**
+ * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 tdba = ring->dma;
+ int wait_loop = 10;
+ u32 txdctl = IXGBE_TXDCTL_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
+ IXGBE_WRITE_FLUSH(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_tx_desc));
+
+ /* disable head writeback */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
+ (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN));
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
+ ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ /* In order to avoid issues WTHRESH + PTHRESH should always be equal
+ * to or less than the number of on chip descriptors, which is
+ * currently 40.
+ */
+ txdctl |= (8 << 16); /* WTHRESH = 8 */
+
+ /* Setting PTHRESH to 32 both improves performance */
+ txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ do {
+ usleep_range(1000, 2000);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
+ } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ pr_err("Could not enable Tx Queue %d\n", reg_idx);
+}
+
+/**
* ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
* @adapter: board private structure
*
@@ -1060,31 +1218,11 @@ static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
{
- u64 tdba;
- struct ixgbe_hw *hw = &adapter->hw;
- u32 i, j, tdlen, txctrl;
+ u32 i;
/* Setup the HW Tx Head and Tail descriptor pointers */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->tx_ring[i];
- j = ring->reg_idx;
- tdba = ring->dma;
- tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
- (tdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_VFTDH(j);
- adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
- /* Disable Tx Head Writeback RO bit, since this hoses
- * bookkeeping if things aren't delivered in order.
- */
- txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
- }
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
@@ -1095,7 +1233,7 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
srrctl = IXGBE_SRRCTL_DROP_EN;
@@ -1107,6 +1245,21 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
+static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+
+ if (adapter->num_rx_queues > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+}
+
static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -1138,7 +1291,97 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+ adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
+}
+
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+ /* write value back with RXDCTL.ENABLE bit cleared */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ do {
+ udelay(10);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ if (IXGBE_REMOVED(hw->hw_addr))
+ return;
+ do {
+ usleep_range(1000, 2000);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+ if (!wait_loop)
+ pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
+ reg_idx);
+}
+
+static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+ u8 reg_idx = ring->reg_idx;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+ ixgbevf_disable_rx_queue(adapter, ring);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
+ ring->count * sizeof(union ixgbe_adv_rx_desc));
+
+ /* enable relaxed ordering */
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+
+ /* reset head and tail pointers */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
+ ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
+
+ /* reset ntu and ntc to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+
+ ixgbevf_configure_srrctl(adapter, reg_idx);
+
+ /* prevent DMA from exceeding buffer space available */
+ rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
+ rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+ ixgbevf_rx_desc_queue_enable(adapter, ring);
+ ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
/**
@@ -1149,34 +1392,17 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- u64 rdba;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j;
- u32 rdlen;
+ int i;
- /* PSRTYPE must be initialized in 82599 */
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ ixgbevf_setup_psrtype(adapter);
/* set_rx_buffer_len must be called before ring initialization */
ixgbevf_set_rx_buffer_len(adapter);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rdba = adapter->rx_ring[i].dma;
- j = adapter->rx_ring[i].reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
- (rdba & DMA_BIT_MASK(32)));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
- IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
- adapter->rx_ring[i].head = IXGBE_VFRDH(j);
- adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
-
- ixgbevf_configure_srrctl(adapter, j);
- }
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -1293,6 +1519,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
+#endif
napi_enable(&q_vector->napi);
}
}
@@ -1306,48 +1535,63 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
+ pr_info("QV %d locked\n", q_idx);
+ usleep_range(1000, 20000);
+ }
+#endif /* CONFIG_NET_RX_BUSY_POLL */
}
}
-static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err;
- ixgbevf_set_rx_mode(netdev);
+ spin_lock_bh(&adapter->mbx_lock);
- ixgbevf_restore_vlan(adapter);
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
- ixgbevf_configure_tx(adapter);
- ixgbevf_configure_rx(adapter);
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring,
- IXGBE_DESC_UNUSED(ring));
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0]->reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* if we have a bad config abort request queue reset */
+ if (adapter->num_rx_queues != num_rx_queues) {
+ /* force mailbox timeout to prevent further messages */
+ hw->mbx.timeout = 0;
+
+ /* wait for watchdog to come around and bail us out */
+ adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
}
+
+ return 0;
}
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
- int rxr)
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int j = adapter->rx_ring[rxr].reg_idx;
- int k;
+ ixgbevf_configure_dcb(adapter);
- for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
- if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
- break;
- else
- msleep(1);
- }
- if (k >= IXGBE_MAX_RX_DESC_POLL) {
- hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
- "not set within the polling period\n", rxr);
- }
+ ixgbevf_set_rx_mode(adapter->netdev);
+
+ ixgbevf_restore_vlan(adapter);
- ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
- adapter->rx_ring[rxr].count - 1);
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1412,37 +1656,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- int i, j = 0;
- int num_rx_rings = adapter->num_rx_queues;
- u32 txdctl, rxdctl;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- /* enable WTHRESH=8 descriptors, to encourage burst writeback */
- txdctl |= (8 << 16);
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
- }
-
- for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
- rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
- rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
- if (hw->mac.type == ixgbe_mac_X540_vf) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
- IXGBE_RXDCTL_RLPML_EN);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
- ixgbevf_rx_desc_queue_enable(adapter, i);
- }
ixgbevf_configure_msix(adapter);
@@ -1455,6 +1668,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1468,87 +1682,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
-static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
-
- /* allocate resources on the ring */
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
- while (i) {
- i--;
- ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
- }
- kfree(rx_ring);
- return err;
- }
- }
-
- /* free the existing rings and queues */
- ixgbevf_free_all_rx_resources(adapter);
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- /* reset ring to vector mapping */
- ixgbevf_reset_q_vectors(adapter);
- ixgbevf_map_rings_to_vectors(adapter);
-
- return 0;
-}
-
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- ixgbevf_negotiate_api(adapter);
-
- ixgbevf_reset_queues(adapter);
-
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1561,13 +1698,10 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
unsigned long size;
unsigned int i;
@@ -1580,7 +1714,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
rx_buffer_info = &rx_ring->rx_buffer_info[i];
if (rx_buffer_info->dma) {
- dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+ dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
@@ -1601,23 +1735,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
* ixgbevf_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned long size;
@@ -1636,14 +1760,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
memset(tx_ring->tx_buffer_info, 0, size);
memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
@@ -1655,7 +1771,7 @@ static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -1667,19 +1783,22 @@ static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
}
void ixgbevf_down(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 txdctl;
- int i, j;
+ int i;
/* signal that we are down to the interrupt handler */
- set_bit(__IXGBEVF_DOWN, &adapter->state);
- /* disable receives */
+ if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
+ return; /* do nothing if already down */
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
netif_tx_disable(netdev);
@@ -1700,10 +1819,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
- IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
+ IXGBE_TXDCTL_SWFLSH);
}
netif_carrier_off(netdev);
@@ -1733,10 +1852,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- if (hw->mac.ops.reset_hw(hw))
+ if (hw->mac.ops.reset_hw(hw)) {
hw_dbg(hw, "PF still resetting\n");
- else
+ } else {
hw->mac.ops.init_hw(hw);
+ ixgbevf_negotiate_api(adapter);
+ }
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -1749,7 +1870,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
int vectors)
{
- int err = 0;
int vector_threshold;
/* We'll want at least 2 (vector_threshold):
@@ -1763,33 +1883,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err || err < 0) /* Success or a nasty failure. */
- break;
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold)
- err = -ENOMEM;
-
- if (err) {
+ if (vectors < 0) {
dev_err(&adapter->pdev->dev,
"Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return vectors;
}
- return err;
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+
+ return 0;
}
/**
@@ -1805,9 +1916,28 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ int err;
+
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return;
+
+ /* we need as many queues as traffic classes */
+ if (num_tcs > 1)
+ adapter->num_rx_queues = num_tcs;
}
/**
@@ -1820,40 +1950,50 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
{
- int i;
+ struct ixgbevf_ring *ring;
+ int rx = 0, tx = 0;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
+ for (; tx < adapter->num_tx_queues; tx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = tx;
+ ring->reg_idx = tx;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
- /* reg_idx may be remapped later by DCB config */
- adapter->tx_ring[i].reg_idx = i;
- adapter->tx_ring[i].dev = &adapter->pdev->dev;
- adapter->tx_ring[i].netdev = adapter->netdev;
+ adapter->tx_ring[tx] = ring;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
- adapter->rx_ring[i].reg_idx = i;
- adapter->rx_ring[i].dev = &adapter->pdev->dev;
- adapter->rx_ring[i].netdev = adapter->netdev;
+ for (; rx < adapter->num_rx_queues; rx++) {
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ goto err_allocation;
+
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rx;
+ ring->reg_idx = rx;
+
+ adapter->rx_ring[rx] = ring;
}
return 0;
-err_rx_ring_allocation:
- kfree(adapter->tx_ring);
-err_tx_ring_allocation:
+err_allocation:
+ while (tx) {
+ kfree(adapter->tx_ring[--tx]);
+ adapter->tx_ring[tx] = NULL;
+ }
+
+ while (rx) {
+ kfree(adapter->rx_ring[--rx]);
+ adapter->rx_ring[rx] = NULL;
+ }
return -ENOMEM;
}
@@ -1929,6 +2069,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbevf_poll, 64);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_add(&q_vector->napi);
+#endif
adapter->q_vector[q_idx] = q_vector;
}
@@ -1938,6 +2081,9 @@ err_out:
while (q_idx) {
q_idx--;
q_vector = adapter->q_vector[q_idx];
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
@@ -1961,6 +2107,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ napi_hash_del(&q_vector->napi);
+#endif
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -2035,6 +2184,17 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
+
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -2072,6 +2232,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->mac.max_tx_queues = 2;
hw->mac.max_rx_queues = 2;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -2082,6 +2245,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
+ ixgbevf_negotiate_api(adapter);
err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
if (err)
dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2261,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
- /* lock to protect mailbox accesses */
- spin_lock_init(&adapter->mbx_lock);
-
/* Enable dynamic interrupt throttling rates */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -2164,11 +2325,8 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error +=
- adapter->rx_ring[i].hw_csum_rx_error;
- adapter->hw_csum_rx_good +=
- adapter->rx_ring[i].hw_csum_rx_good;
- adapter->rx_ring[i].hw_csum_rx_error = 0;
- adapter->rx_ring[i].hw_csum_rx_good = 0;
+ adapter->rx_ring[i]->hw_csum_rx_error;
+ adapter->rx_ring[i]->hw_csum_rx_error = 0;
}
}
@@ -2223,6 +2381,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
@@ -2246,6 +2405,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
bool link_up = adapter->link_up;
s32 need_reset;
+ if (IXGBE_REMOVED(hw->hw_addr)) {
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ rtnl_lock();
+ ixgbevf_down(adapter);
+ rtnl_unlock();
+ }
+ return;
+ }
+ ixgbevf_queue_reset_subtask(adapter);
+
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
/*
@@ -2305,7 +2474,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
pf_has_reset:
/* Reset the timer */
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
mod_timer(&adapter->watchdog_timer,
round_jiffies(jiffies + (2 * HZ)));
@@ -2314,22 +2484,22 @@ pf_has_reset:
/**
* ixgbevf_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
-void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_tx_ring(adapter, tx_ring);
+ ixgbevf_clean_tx_ring(tx_ring);
vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
tx_ring->dma);
tx_ring->desc = NULL;
@@ -2346,23 +2516,18 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbevf_free_tx_resources(adapter,
- &adapter->tx_ring[i]);
-
+ if (adapter->tx_ring[i]->desc)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
}
/**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
* @tx_ring: tx descriptor ring (for a specific queue) to setup
*
* Return 0 on success, negative on failure
**/
-int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring)
+int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -2374,13 +2539,11 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+ tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
return 0;
err:
@@ -2406,7 +2569,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2419,40 +2582,34 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
* @rx_ring: rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
-int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
int size;
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
- goto alloc_failed;
+ goto err;
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
- if (!rx_ring->desc) {
- vfree(rx_ring->rx_buffer_info);
- rx_ring->rx_buffer_info = NULL;
- goto alloc_failed;
- }
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
+ if (!rx_ring->desc)
+ goto err;
return 0;
-alloc_failed:
+err:
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2471,7 +2628,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
hw_dbg(&adapter->hw,
@@ -2483,22 +2640,18 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
-void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *rx_ring)
+void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgbevf_clean_rx_ring(adapter, rx_ring);
+ ixgbevf_clean_rx_ring(rx_ring);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2515,66 +2668,8 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbevf_free_rx_resources(adapter,
- &adapter->rx_ring[i]);
-}
-
-static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- unsigned int def_q = 0;
- unsigned int num_tcs = 0;
- unsigned int num_rx_queues = 1;
- int err, i;
-
- spin_lock_bh(&adapter->mbx_lock);
-
- /* fetch queue configuration from the PF */
- err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
-
- spin_unlock_bh(&adapter->mbx_lock);
-
- if (err)
- return err;
-
- if (num_tcs > 1) {
- /* update default Tx ring register index */
- adapter->tx_ring[0].reg_idx = def_q;
-
- /* we need as many queues as traffic classes */
- num_rx_queues = num_tcs;
- }
-
- /* nothing to do if we have the correct number of queues */
- if (adapter->num_rx_queues == num_rx_queues)
- return 0;
-
- /* allocate new rings */
- rx_ring = kcalloc(num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring)
- return -ENOMEM;
-
- /* setup ring fields */
- for (i = 0; i < num_rx_queues; i++) {
- rx_ring[i].count = adapter->rx_ring_count;
- rx_ring[i].queue_index = i;
- rx_ring[i].reg_idx = i;
- rx_ring[i].dev = &adapter->pdev->dev;
- rx_ring[i].netdev = adapter->netdev;
- }
-
- /* free the existing ring and queues */
- adapter->num_rx_queues = 0;
- kfree(adapter->rx_ring);
-
- /* move new rings into position on the adapter struct */
- adapter->rx_ring = rx_ring;
- adapter->num_rx_queues = num_rx_queues;
-
- return 0;
+ if (adapter->rx_ring[i]->desc)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2620,13 +2715,6 @@ static int ixgbevf_open(struct net_device *netdev)
}
}
- ixgbevf_negotiate_api(adapter);
-
- /* setup queue reg_idx and Rx queue count */
- err = ixgbevf_setup_queues(adapter);
- if (err)
- goto err_setup_queues;
-
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2664,7 +2752,6 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
-err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2696,6 +2783,34 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
+static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+
+ if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ return;
+
+ adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunately, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbevf_close(dev);
+
+ ixgbevf_clear_interrupt_scheme(adapter);
+ ixgbevf_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ ixgbevf_open(dev);
+}
+
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
@@ -2718,19 +2833,23 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+ struct ixgbevf_tx_buffer *first,
+ u8 *hdr_len)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ int err;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
if (!skb_is_gso(skb))
return 0;
- if (skb_header_cloned(skb)) {
- int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
@@ -2744,12 +2863,17 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
IPPROTO_TCP,
0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM |
+ IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO |
+ IXGBE_TX_FLAGS_CSUM;
}
/* compute header lengths */
@@ -2757,6 +2881,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len;
+ /* update gso size and bytecount with header size */
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
+ first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
/* mss_l4len_id: use 1 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
@@ -2765,7 +2893,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
@@ -2773,9 +2901,10 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first)
{
+ struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2783,12 +2912,12 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
l4_hdr = ip_hdr(skb)->protocol;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
@@ -2796,7 +2925,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n",
- skb->protocol);
+ first->protocol);
}
break;
}
@@ -2824,184 +2953,190 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
break;
}
+
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
- vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
type_tucmd, mss_l4len_idx);
-
- return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
{
- struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int len;
- unsigned int total = skb->len;
- unsigned int offset = 0, size;
- int count = 0;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
- int i;
+ /* set type for advanced descriptor with frame checksum insertion */
+ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_IFCS |
+ IXGBE_ADVTXD_DCMD_DEXT);
+
+ /* set HW vlan bit if vlan is present */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
- i = tx_ring->next_to_use;
+ /* set segmentation enable bits for TSO/FSO */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
- len = min(skb_headlen(skb), total);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
-
- tx_buffer_info->length = size;
- tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(tx_ring->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
- goto dma_error;
+ return cmd_type;
+}
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
+static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
+ u32 tx_flags, unsigned int paylen)
+{
+ __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
- for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ /* enable L4 checksum for TSO and TX checksum offload */
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
- frag = &skb_shinfo(skb)->frags[f];
- len = min((unsigned int)skb_frag_size(frag), total);
- offset = 0;
+ /* enble IPv4 checksum for TSO */
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- while (len) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+ /* use index 1 context for TSO/FSO/FCOE */
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
- tx_buffer_info->length = size;
- tx_buffer_info->dma =
- skb_frag_dma_map(tx_ring->dev, frag,
- offset, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev,
- tx_buffer_info->dma))
- goto dma_error;
- tx_buffer_info->mapped_as_page = true;
+ /* Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
- len -= size;
- total -= size;
- offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- if (total == 0)
- break;
- }
+ tx_desc->read.olinfo_status = olinfo_status;
+}
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i = i - 1;
- tx_ring->tx_buffer_info[i].skb = skb;
+static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
+ struct ixgbevf_tx_buffer *first,
+ const u8 hdr_len)
+{
+ dma_addr_t dma;
+ struct sk_buff *skb = first->skb;
+ struct ixgbevf_tx_buffer *tx_buffer;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ unsigned int paylen = skb->len - hdr_len;
+ u32 tx_flags = first->tx_flags;
+ __le32 cmd_type;
+ u16 i = tx_ring->next_to_use;
- return count;
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
-dma_error:
- dev_err(tx_ring->dev, "TX DMA map failed\n");
+ ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
+ cmd_type = ixgbevf_tx_cmd_type(tx_flags);
- /* clear timestamp and dma mappings for failed tx_buffer_info map */
- tx_buffer_info->dma = 0;
- count--;
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- /* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
- i--;
- if (i < 0)
- i += tx_ring->count;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- }
+ /* record length, and DMA address */
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
- return count;
-}
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
-static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, unsigned int first, u32 paylen,
- u8 hdr_len)
-{
- union ixgbe_adv_tx_desc *tx_desc = NULL;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 olinfo_status = 0, cmd_type_len = 0;
- unsigned int i;
+ for (;;) {
+ while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+ dma += IXGBE_MAX_DATA_PER_TXD;
+ size -= IXGBE_MAX_DATA_PER_TXD;
- cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
+ }
- cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+ if (likely(!data_len))
+ break;
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
- if (tx_flags & IXGBE_TX_FLAGS_TSO) {
- cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+ size = skb_frag_size(frag);
+ data_len -= size;
- /* use index 1 context for tso */
- olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
- }
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
- /*
- * Check Context must be set if Tx switch is enabled, which it
- * always is for case where virtual functions are running
- */
- olinfo_status |= IXGBE_ADVTXD_CC;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_desc->read.olinfo_status = 0;
- i = tx_ring->next_to_use;
- while (count--) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
- tx_desc->read.cmd_type_len =
- cpu_to_le32(cmd_type_len | tx_buffer_info->length);
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- i++;
- if (i == tx_ring->count)
- i = 0;
+ frag++;
}
- tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
+ tx_desc->read.cmd_type_len = cmd_type;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+ /* set the timestamp */
+ first->time_stamp = jiffies;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier (wmb) to make certain all of the
+ * status bits have been updated before next_to_watch is written.
*/
wmb();
- tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ /* notify HW of packet */
+ ixgbevf_write_tail(tx_ring, i);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
tx_ring->next_to_use = i;
}
static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -3010,18 +3145,19 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
+ ++tx_ring->tx_stats.restart_queue;
+
return 0;
}
static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbevf_desc_unused(tx_ring) >= size))
return 0;
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
@@ -3029,22 +3165,23 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_tx_buffer *first;
struct ixgbevf_ring *tx_ring;
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
- int r_idx = 0, tso;
+ int tso;
+ u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 hdr_len = 0;
u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+
if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- tx_ring = &adapter->tx_ring[r_idx];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
/*
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
@@ -3060,38 +3197,41 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
count += skb_shinfo(skb)->nr_frags;
#endif
if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
- adapter->tx_busy++;
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- first = tx_ring->next_to_use;
+ /* record initial flags and protocol */
+ first->tx_flags = tx_flags;
+ first->protocol = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP))
- tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (!tso)
+ ixgbevf_tx_csum(tx_ring, first);
- if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
- else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
- tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ ixgbevf_tx_map(tx_ring, first, hdr_len);
- ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags),
- first, skb->len, hdr_len);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
- writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
+ return NETDEV_TX_OK;
- ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+out_drop:
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
return NETDEV_TX_OK;
}
@@ -3189,7 +3329,8 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
return retval;
#endif
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return 0;
}
@@ -3197,11 +3338,10 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
#ifdef CONFIG_PM
static int ixgbevf_resume(struct pci_dev *pdev)
{
- struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
u32 err;
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
* pci_restore_state clears dev->state_saved so call
@@ -3214,8 +3354,12 @@ static int ixgbevf_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
+ smp_mb__before_atomic();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
+ ixgbevf_reset(adapter);
+
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
rtnl_unlock();
@@ -3224,8 +3368,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
return err;
}
- ixgbevf_reset(adapter);
-
if (netif_running(netdev)) {
err = ixgbevf_open(netdev);
if (err)
@@ -3257,23 +3399,23 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = &adapter->rx_ring[i];
+ ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = &adapter->tx_ring[i];
+ ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_bh(&ring->syncp);
- bytes = ring->total_bytes;
- packets = ring->total_packets;
- } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ bytes = ring->stats.bytes;
+ packets = ring->stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -3293,6 +3435,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = ixgbevf_busy_poll_recv,
+#endif
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3326,19 +3471,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
- goto err_dma;
- }
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
}
pci_using_dac = 0;
}
@@ -3377,6 +3517,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ adapter->io_addr = hw->hw_addr;
if (!hw->hw_addr) {
err = -EIO;
goto err_ioremap;
@@ -3432,8 +3573,13 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->watchdog_timer.function = ixgbevf_watchdog;
adapter->watchdog_timer.data = (unsigned long)adapter;
+ if (IXGBE_REMOVED(hw->hw_addr)) {
+ err = -EIO;
+ goto err_sw_init;
+ }
INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+ set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
err = ixgbevf_init_interrupt_scheme(adapter);
if (err)
@@ -3462,14 +3608,15 @@ err_register:
ixgbevf_clear_interrupt_scheme(adapter);
err_sw_init:
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(hw->hw_addr);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
return err;
}
@@ -3487,7 +3634,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- set_bit(__IXGBEVF_DOWN, &adapter->state);
+ set_bit(__IXGBEVF_REMOVING, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
@@ -3500,17 +3647,15 @@ static void ixgbevf_remove(struct pci_dev *pdev)
ixgbevf_clear_interrupt_scheme(adapter);
ixgbevf_reset_interrupt_capability(adapter);
- iounmap(adapter->hw.hw_addr);
+ iounmap(adapter->io_addr);
pci_release_regions(pdev);
hw_dbg(&adapter->hw, "Remove complete\n");
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
free_netdev(netdev);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
}
/**
@@ -3527,15 +3672,23 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
netif_device_detach(netdev);
- if (state == pci_channel_io_perm_failure)
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
+ }
if (netif_running(netdev))
ixgbevf_down(adapter);
- pci_disable_device(pdev);
+ if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
+ pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -3559,6 +3712,8 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+ smp_mb__before_atomic();
+ clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
ixgbevf_reset(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index debd8c0e1f2..09dd8f698be 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -70,16 +70,6 @@
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 387b52635bc..4d44d64ae38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
- memcpy(msg_addr, addr, 6);
+ memcpy(msg_addr, addr, ETH_ALEN);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val)
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 7b1f502d171..3061d189047 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
const struct ixgbe_mac_operations *mac_ops;
};
+#define IXGBE_FAILED_READ_REG 0xffffffffU
+
+#define IXGBE_REMOVED(a) unlikely(!(a))
+
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+ if (IXGBE_REMOVED(reg_addr))
+ return;
+ writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
+
+u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r)
+
+static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset, u32 value)
+{
+ ixgbe_write_reg(hw, reg + (offset << 2), value);
+}
+#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
+
+static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
+ u32 offset)
+{
+ return ixgbevf_read_reg(hw, reg + (offset << 2));
+}
+#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
+
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,