diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
31 files changed, 8739 insertions, 3459 deletions
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 479b2c4e552..d9eb80acac4 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@  ################################################################################  #  # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 Intel Corporation. +# Copyright(c) 2013 - 2014 Intel Corporation.  #  # This program is free software; you can redistribute it and/or modify it  # under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@  # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for  # more details.  # -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# You should have received a copy of the GNU General Public License along +# with this program.  If not, see <http://www.gnu.org/licenses/>.  #  # The full GNU General Public License is included in this distribution in  # the file called "COPYING". @@ -41,4 +40,7 @@ i40e-objs := i40e_main.o \  	i40e_debugfs.o	\  	i40e_diag.o	\  	i40e_txrx.o	\ +	i40e_ptp.o	\  	i40e_virtchnl_pf.o + +i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b5252eb8a6c..65985846345 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -29,7 +28,7 @@  #define _I40E_H_  #include <net/tcp.h> -#include <linux/init.h> +#include <net/udp.h>  #include <linux/types.h>  #include <linux/errno.h>  #include <linux/module.h> @@ -46,16 +45,19 @@  #include <linux/sctp.h>  #include <linux/pkt_sched.h>  #include <linux/ipv6.h> -#include <linux/version.h>  #include <net/checksum.h>  #include <net/ip6_checksum.h>  #include <linux/ethtool.h>  #include <linux/if_vlan.h> +#include <linux/clocksource.h> +#include <linux/net_tstamp.h> +#include <linux/ptp_clock_kernel.h>  #include "i40e_type.h"  #include "i40e_prototype.h"  #include "i40e_virtchnl.h"  #include "i40e_virtchnl_pf.h"  #include "i40e_txrx.h" +#include "i40e_dcb.h"  /* Useful i40e defaults */  #define I40E_BASE_PF_SEID     16 @@ -64,15 +66,17 @@  #define I40E_MAX_VEB          16  #define I40E_MAX_NUM_DESCRIPTORS      4096 -#define I40E_MAX_REGISTER     0x0038FFFF +#define I40E_MAX_REGISTER     0x800000  #define I40E_DEFAULT_NUM_DESCRIPTORS  512  #define I40E_REQ_DESCRIPTOR_MULTIPLE  32  #define I40E_MIN_NUM_DESCRIPTORS      64  #define I40E_MIN_MSIX                 2  #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */ +#define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */  #define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */  #define I40E_DEFAULT_QUEUES_PER_VF    4  #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */ +#define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */  #define I40E_FDIR_RING                0  #define I40E_FDIR_RING_COUNT          32  #define I40E_MAX_AQ_BUF_SIZE          4096 @@ -82,20 +86,18 @@  #define I40E_DEFAULT_MSG_ENABLE       4  #define I40E_NVM_VERSION_LO_SHIFT  0 -#define I40E_NVM_VERSION_LO_MASK   (0xf << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_MID_SHIFT 4 -#define I40E_NVM_VERSION_MID_MASK  (0xff << I40E_NVM_VERSION_MID_SHIFT) +#define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)  #define I40E_NVM_VERSION_HI_SHIFT  12  #define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT) +/* The values in here are decimal coded as hex as is the case in the NVM map*/ +#define I40E_CURRENT_NVM_VERSION_HI 0x2 +#define I40E_CURRENT_NVM_VERSION_LO 0x40 +  /* magic for getting defines into strings */  #define STRINGIFY(foo)  #foo  #define XSTRINGIFY(bar) STRINGIFY(bar) -#ifndef ARCH_HAS_PREFETCH -#define prefetch(X) -#endif -  #define I40E_RX_DESC(R, i)			\  	((ring_is_16byte_desc_enabled(R))	\  		? (union i40e_32byte_rx_desc *)	\ @@ -128,7 +130,10 @@ enum i40e_state_t {  	__I40E_PF_RESET_REQUESTED,  	__I40E_CORE_RESET_REQUESTED,  	__I40E_GLOBAL_RESET_REQUESTED, +	__I40E_EMP_RESET_REQUESTED,  	__I40E_FILTER_OVERFLOW_PROMISC, +	__I40E_SUSPENDED, +	__I40E_BAD_EEPROM,  };  enum i40e_interrupt_policy { @@ -145,8 +150,33 @@ struct i40e_lump_tracking {  };  #define I40E_DEFAULT_ATR_SAMPLE_RATE	20 -#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512 -struct i40e_fdir_data { +#define I40E_FDIR_MAX_RAW_PACKET_SIZE	512 +#define I40E_FDIR_BUFFER_FULL_MARGIN	10 +#define I40E_FDIR_BUFFER_HEAD_ROOM	200 + +enum i40e_fd_stat_idx { +	I40E_FD_STAT_ATR, +	I40E_FD_STAT_SB, +	I40E_FD_STAT_PF_COUNT +}; +#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) +#define I40E_FD_ATR_STAT_IDX(pf_id) \ +			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) +#define I40E_FD_SB_STAT_IDX(pf_id)  \ +			(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) + +struct i40e_fdir_filter { +	struct hlist_node fdir_node; +	/* filter ipnut set */ +	u8 flow_type; +	u8 ip4_proto; +	/* TX packet view of src and dst */ +	__be32 dst_ip[4]; +	__be32 src_ip[4]; +	__be16 src_port; +	__be16 dst_port; +	__be32 sctp_v_tag; +	/* filter control */  	u16 q_index;  	u8  flex_off;  	u8  pctype; @@ -155,9 +185,10 @@ struct i40e_fdir_data {  	u8  fd_status;  	u16 cnt_index;  	u32 fd_id; -	u8  *raw_packet;  }; +#define I40E_ETH_P_LLDP			0x88cc +  #define I40E_DCB_PRIO_TYPE_STRICT	0  #define I40E_DCB_PRIO_TYPE_ETS		1  #define I40E_DCB_STRICT_PRIO_CREDITS	127 @@ -183,23 +214,34 @@ struct i40e_pf {  	unsigned long state;  	unsigned long link_check_timeout;  	struct msix_entry *msix_entries; -	u16 num_msix_entries;  	bool fc_autoneg_status;  	u16 eeprom_version; -	u16 num_vmdq_vsis;         /* num vmdq pools this pf has set up */ +	u16 num_vmdq_vsis;         /* num vmdq vsis this pf has set up */  	u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */  	u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */  	u16 num_req_vfs;           /* num vfs requested for this vf */  	u16 num_vf_qps;            /* num queue pairs per vf */ -	u16 num_tc_qps;            /* num queue pairs per TC */  	u16 num_lan_qps;           /* num lan queues this pf has set up */  	u16 num_lan_msix;          /* num queue vectors for the base pf vsi */ +	int queues_left;           /* queues left unclaimed */  	u16 rss_size;              /* num queues in the RSS array */  	u16 rss_size_max;          /* HW defined max RSS queues */  	u16 fdir_pf_filter_count;  /* num of guaranteed filters for this PF */ +	u16 num_alloc_vsi;         /* num VSIs this driver supports */  	u8 atr_sample_rate; +	bool wol_en; + +	struct hlist_head fdir_filter_list; +	u16 fdir_pf_active_filters; +	u16 fd_sb_cnt_idx; +	u16 fd_atr_cnt_idx; +#ifdef CONFIG_I40E_VXLAN +	__be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; +	u16 pending_vxlan_bitmap; + +#endif  	enum i40e_interrupt_policy int_policy;  	u16 rx_itr_default;  	u16 tx_itr_default; @@ -217,24 +259,28 @@ struct i40e_pf {  #define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)  #define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)  #define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6) -#define I40E_FLAG_MQ_ENABLED                   (u64)(1 << 7) -#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 8) -#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 9) -#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 10) -#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 13) -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 14) -#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 15) -#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 16) -#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 18) -#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 19) -#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 20) -#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 21) -#define I40E_FLAG_FDIR_ENABLED                 (u64)(1 << 22) -#define I40E_FLAG_FDIR_ATR_ENABLED             (u64)(1 << 23) -#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 27) - -	u16 num_tx_queues; -	u16 num_rx_queues; +#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7) +#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8) +#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9) +#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12) +#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13) +#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14) +#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15) +#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17) +#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18) +#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19) +#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20) +#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21) +#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22) +#define I40E_FLAG_PTP                          (u64)(1 << 25) +#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26) +#ifdef CONFIG_I40E_VXLAN +#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27) +#endif +#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29) + +	/* tracks features that get auto disabled by errors */ +	u64 auto_disable_flags;  	bool stat_offsets_loaded;  	struct i40e_hw_port_stats stats; @@ -248,6 +294,7 @@ struct i40e_pf {  	u16 globr_count; /* Global reset count */  	u16 empr_count; /* EMP reset count */  	u16 pfr_count; /* PF reset count */ +	u16 sw_int_count; /* SW interrupt count */  	struct mutex switch_mutex;  	u16 lan_vsi;       /* our default LAN VSI */ @@ -265,12 +312,13 @@ struct i40e_pf {  	u16 pf_seid;  	u16 main_vsi_seid;  	u16 mac_seid; -	struct i40e_aqc_get_switch_config_data *sw_config;  	struct kobject *switch_kobj;  #ifdef CONFIG_DEBUG_FS  	struct dentry *i40e_dbg_pf;  #endif /* CONFIG_DEBUG_FS */ +	u16 instance; /* A unique number per i40e_pf instance in the system */ +  	/* sr-iov config info */  	struct i40e_vf *vf;  	int num_alloc_vfs;	/* actual number of VFs allocated */ @@ -288,6 +336,18 @@ struct i40e_pf {  	u32	fcoe_hmc_filt_num;  	u32	fcoe_hmc_cntx_num;  	struct i40e_filter_control_settings filter_settings; + +	struct ptp_clock *ptp_clock; +	struct ptp_clock_info ptp_caps; +	struct sk_buff *ptp_tx_skb; +	struct hwtstamp_config tstamp_config; +	unsigned long last_rx_ptp_check; +	spinlock_t tmreg_lock; /* Used to protect the device time registers. */ +	u64 ptp_base_adj; +	u32 tx_hwtstamp_timeouts; +	u32 rx_hwtstamp_cleared; +	bool ptp_tx; +	bool ptp_rx;  };  struct i40e_mac_filter { @@ -347,9 +407,9 @@ struct i40e_vsi {  	u32 rx_buf_failed;  	u32 rx_page_failed; -	/* These are arrays of rings, allocated at run-time */ -	struct i40e_ring *rx_rings; -	struct i40e_ring *tx_rings; +	/* These are containers of ring pointers, allocated at run-time */ +	struct i40e_ring **rx_rings; +	struct i40e_ring **tx_rings;  	u16 work_limit;  	/* high bit set means dynamic, use accessor routines to read/write. @@ -366,9 +426,10 @@ struct i40e_vsi {  	u8  dtype;  	/* List of q_vectors allocated to this VSI */ -	struct i40e_q_vector *q_vectors; +	struct i40e_q_vector **q_vectors;  	int num_q_vectors;  	int base_vector; +	bool irqs_ready;  	u16 seid;            /* HW index of this VSI (absolute index) */  	u16 id;              /* VSI number */ @@ -422,8 +483,9 @@ struct i40e_q_vector {  	u8 num_ringpairs;	/* total number of ring pairs in vector */ -	char name[IFNAMSIZ + 9];  	cpumask_t affinity_mask; +	struct rcu_head rcu;	/* to avoid race with update stats on free */ +	char name[IFNAMSIZ + 9];  } ____cacheline_internodealigned_in_smp;  /* lan device */ @@ -441,15 +503,13 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)  	static char buf[32];  	snprintf(buf, sizeof(buf), -		 "f%d.%d a%d.%d n%02d.%02d.%02d e%08x", +		 "f%d.%d a%d.%d n%02x.%02x e%08x",  		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,  		 hw->aq.api_maj_ver, hw->aq.api_min_ver, -		 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) -						>> I40E_NVM_VERSION_HI_SHIFT, -		 (hw->nvm.version & I40E_NVM_VERSION_MID_MASK) -						>> I40E_NVM_VERSION_MID_SHIFT, -		 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) -						>> I40E_NVM_VERSION_LO_SHIFT, +		 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> +			I40E_NVM_VERSION_HI_SHIFT, +		 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> +			I40E_NVM_VERSION_LO_SHIFT,  		 hw->nvm.eetrack);  	return buf; @@ -490,11 +550,21 @@ static inline bool i40e_rx_is_programming_status(u64 qw)  		(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);  } +/** + * i40e_get_fd_cnt_all - get the total FD filter space available + * @pf: pointer to the pf struct + **/ +static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) +{ +	return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; +} +  /* needed by i40e_ethtool.c */  int i40e_up(struct i40e_vsi *vsi);  void i40e_down(struct i40e_vsi *vsi);  extern const char i40e_driver_name[];  extern const char i40e_driver_version_str[]; +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);  void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);  void i40e_update_stats(struct i40e_vsi *vsi);  void i40e_update_eth_stats(struct i40e_vsi *vsi); @@ -502,16 +572,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);  int i40e_fetch_switch_configuration(struct i40e_pf *pf,  				    bool printconfig); -/* needed by i40e_main.c */ -void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data, -			  struct i40e_ring *tx_ring); -void i40e_add_remove_filter(struct i40e_fdir_data fdir_data, -			    struct i40e_ring *tx_ring); -void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data, -			     struct i40e_ring *tx_ring); -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,  			     struct i40e_pf *pf, bool add); - +int i40e_add_del_fdir(struct i40e_vsi *vsi, +		      struct i40e_fdir_filter *input, bool add); +void i40e_fdir_check_and_reenable(struct i40e_pf *pf); +int i40e_get_current_fd_count(struct i40e_pf *pf); +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);  void i40e_set_ethtool_ops(struct net_device *netdev);  struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,  					u8 *macaddr, s16 vlan, @@ -524,10 +591,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,  int i40e_vsi_release(struct i40e_vsi *vsi);  struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,  				 struct i40e_vsi *start_vsi); +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);  struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,  				u16 downlink_seid, u8 enabled_tc);  void i40e_veb_release(struct i40e_veb *veb); +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc);  i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);  void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);  void i40e_vsi_reset_stats(struct i40e_vsi *vsi); @@ -544,7 +614,10 @@ static inline void i40e_dbg_init(void) {}  static inline void i40e_dbg_exit(void) {}  #endif /* CONFIG_DEBUG_FS*/  void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);  int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +int i40e_vsi_open(struct i40e_vsi *vsi);  void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);  int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);  int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); @@ -554,5 +627,21 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);  struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,  				      bool is_vf, bool is_netdev);  void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); - +#ifdef CONFIG_I40E_DCB +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, +			   struct i40e_dcbx_config *new_cfg); +void i40e_dcbnl_set_all(struct i40e_vsi *vsi); +void i40e_dcbnl_setup(struct i40e_vsi *vsi); +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, +			    struct i40e_dcbx_config *old_cfg, +			    struct i40e_dcbx_config *new_cfg); +#endif /* CONFIG_I40E_DCB */ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); +void i40e_ptp_set_increment(struct i40e_pf *pf); +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); +void i40e_ptp_init(struct i40e_pf *pf); +void i40e_ptp_stop(struct i40e_pf *pf);  #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index cfef7fc32cd..7a027499fc5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -31,6 +30,18 @@  #include "i40e_adminq.h"  #include "i40e_prototype.h" +static void i40e_resume_aq(struct i40e_hw *hw); + +/** + * i40e_is_nvm_update_op - return true if this is an NVM update operation + * @desc: API request descriptor + **/ +static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) +{ +	return (desc->opcode == i40e_aqc_opc_nvm_erase) || +	       (desc->opcode == i40e_aqc_opc_nvm_update); +} +  /**   *  i40e_adminq_init_regs - Initialize AdminQ registers   *  @hw: pointer to the hardware structure @@ -43,13 +54,17 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)  	if (hw->mac.type == I40E_MAC_VF) {  		hw->aq.asq.tail = I40E_VF_ATQT1;  		hw->aq.asq.head = I40E_VF_ATQH1; +		hw->aq.asq.len  = I40E_VF_ATQLEN1;  		hw->aq.arq.tail = I40E_VF_ARQT1;  		hw->aq.arq.head = I40E_VF_ARQH1; +		hw->aq.arq.len  = I40E_VF_ARQLEN1;  	} else {  		hw->aq.asq.tail = I40E_PF_ATQT;  		hw->aq.asq.head = I40E_PF_ATQH; +		hw->aq.asq.len  = I40E_PF_ATQLEN;  		hw->aq.arq.tail = I40E_PF_ARQT;  		hw->aq.arq.head = I40E_PF_ARQH; +		hw->aq.arq.len  = I40E_PF_ARQLEN;  	}  } @@ -60,9 +75,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)  static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)  {  	i40e_status ret_code; -	struct i40e_virt_mem mem; -	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem, +	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,  					 i40e_mem_atq_ring,  					 (hw->aq.num_asq_entries *  					 sizeof(struct i40e_aq_desc)), @@ -70,21 +84,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)  	if (ret_code)  		return ret_code; -	hw->aq.asq.desc = hw->aq.asq_mem.va; -	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa; - -	ret_code = i40e_allocate_virt_mem(hw, &mem, +	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,  					  (hw->aq.num_asq_entries *  					  sizeof(struct i40e_asq_cmd_details)));  	if (ret_code) { -		i40e_free_dma_mem(hw, &hw->aq.asq_mem); -		hw->aq.asq_mem.va = NULL; -		hw->aq.asq_mem.pa = 0; +		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);  		return ret_code;  	} -	hw->aq.asq.details = mem.va; -  	return ret_code;  } @@ -96,16 +103,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)  {  	i40e_status ret_code; -	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem, +	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,  					 i40e_mem_arq_ring,  					 (hw->aq.num_arq_entries *  					 sizeof(struct i40e_aq_desc)),  					 I40E_ADMINQ_DESC_ALIGNMENT); -	if (ret_code) -		return ret_code; - -	hw->aq.arq.desc = hw->aq.arq_mem.va; -	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;  	return ret_code;  } @@ -119,14 +121,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)   **/  static void i40e_free_adminq_asq(struct i40e_hw *hw)  { -	struct i40e_virt_mem mem; - -	i40e_free_dma_mem(hw, &hw->aq.asq_mem); -	hw->aq.asq_mem.va = NULL; -	hw->aq.asq_mem.pa = 0; -	mem.va = hw->aq.asq.details; -	i40e_free_virt_mem(hw, &mem); -	hw->aq.asq.details = NULL; +	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);  }  /** @@ -138,20 +133,17 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)   **/  static void i40e_free_adminq_arq(struct i40e_hw *hw)  { -	i40e_free_dma_mem(hw, &hw->aq.arq_mem); -	hw->aq.arq_mem.va = NULL; -	hw->aq.arq_mem.pa = 0; +	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);  }  /**   *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   **/  static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)  {  	i40e_status ret_code;  	struct i40e_aq_desc *desc; -	struct i40e_virt_mem mem;  	struct i40e_dma_mem *bi;  	int i; @@ -160,11 +152,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)  	 */  	/* buffer_info structures do not need alignment */ -	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries * -					  sizeof(struct i40e_dma_mem))); +	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head, +		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));  	if (ret_code)  		goto alloc_arq_bufs; -	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va; +	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;  	/* allocate the mapped buffers */  	for (i = 0; i < hw->aq.num_arq_entries; i++) { @@ -206,29 +198,27 @@ unwind_alloc_arq_bufs:  	i--;  	for (; i >= 0; i--)  		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); -	mem.va = hw->aq.arq.r.arq_bi; -	i40e_free_virt_mem(hw, &mem); +	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);  	return ret_code;  }  /**   *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   **/  static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)  {  	i40e_status ret_code; -	struct i40e_virt_mem mem;  	struct i40e_dma_mem *bi;  	int i;  	/* No mapped memory needed yet, just the buffer info structures */ -	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries * -					  sizeof(struct i40e_dma_mem))); +	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head, +		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));  	if (ret_code)  		goto alloc_asq_bufs; -	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va; +	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;  	/* allocate the mapped buffers */  	for (i = 0; i < hw->aq.num_asq_entries; i++) { @@ -248,35 +238,36 @@ unwind_alloc_asq_bufs:  	i--;  	for (; i >= 0; i--)  		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); -	mem.va = hw->aq.asq.r.asq_bi; -	i40e_free_virt_mem(hw, &mem); +	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);  	return ret_code;  }  /**   *  i40e_free_arq_bufs - Free receive queue buffer info elements - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   **/  static void i40e_free_arq_bufs(struct i40e_hw *hw)  { -	struct i40e_virt_mem mem;  	int i; +	/* free descriptors */  	for (i = 0; i < hw->aq.num_arq_entries; i++)  		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); -	mem.va = hw->aq.arq.r.arq_bi; -	i40e_free_virt_mem(hw, &mem); +	/* free the descriptor memory */ +	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf); + +	/* free the dma header */ +	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);  }  /**   *  i40e_free_asq_bufs - Free send queue buffer info elements - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   **/  static void i40e_free_asq_bufs(struct i40e_hw *hw)  { -	struct i40e_virt_mem mem;  	int i;  	/* only unmap if the address is non-NULL */ @@ -284,63 +275,98 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)  		if (hw->aq.asq.r.asq_bi[i].pa)  			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); -	/* now free the buffer info list */ -	mem.va = hw->aq.asq.r.asq_bi; -	i40e_free_virt_mem(hw, &mem); +	/* free the buffer info list */ +	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf); + +	/* free the descriptor memory */ +	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf); + +	/* free the dma header */ +	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);  }  /**   *  i40e_config_asq_regs - configure ASQ registers - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  Configure base address and length registers for the transmit queue   **/ -static void i40e_config_asq_regs(struct i40e_hw *hw) +static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)  { +	i40e_status ret_code = 0; +	u32 reg = 0; +  	if (hw->mac.type == I40E_MAC_VF) {  		/* configure the transmit queue */ -		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr)); -		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr)); +		wr32(hw, I40E_VF_ATQBAH1, +		    upper_32_bits(hw->aq.asq.desc_buf.pa)); +		wr32(hw, I40E_VF_ATQBAL1, +		    lower_32_bits(hw->aq.asq.desc_buf.pa));  		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |  					  I40E_VF_ATQLEN1_ATQENABLE_MASK)); +		reg = rd32(hw, I40E_VF_ATQBAL1);  	} else {  		/* configure the transmit queue */ -		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr)); -		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr)); +		wr32(hw, I40E_PF_ATQBAH, +		    upper_32_bits(hw->aq.asq.desc_buf.pa)); +		wr32(hw, I40E_PF_ATQBAL, +		    lower_32_bits(hw->aq.asq.desc_buf.pa));  		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |  					  I40E_PF_ATQLEN_ATQENABLE_MASK)); +		reg = rd32(hw, I40E_PF_ATQBAL);  	} + +	/* Check one register to verify that config was applied */ +	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa)) +		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + +	return ret_code;  }  /**   *  i40e_config_arq_regs - ARQ register configuration - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   * Configure base address and length registers for the receive (event queue)   **/ -static void i40e_config_arq_regs(struct i40e_hw *hw) +static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)  { +	i40e_status ret_code = 0; +	u32 reg = 0; +  	if (hw->mac.type == I40E_MAC_VF) {  		/* configure the receive queue */ -		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr)); -		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr)); +		wr32(hw, I40E_VF_ARQBAH1, +		    upper_32_bits(hw->aq.arq.desc_buf.pa)); +		wr32(hw, I40E_VF_ARQBAL1, +		    lower_32_bits(hw->aq.arq.desc_buf.pa));  		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |  					  I40E_VF_ARQLEN1_ARQENABLE_MASK)); +		reg = rd32(hw, I40E_VF_ARQBAL1);  	} else {  		/* configure the receive queue */ -		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr)); -		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr)); +		wr32(hw, I40E_PF_ARQBAH, +		    upper_32_bits(hw->aq.arq.desc_buf.pa)); +		wr32(hw, I40E_PF_ARQBAL, +		    lower_32_bits(hw->aq.arq.desc_buf.pa));  		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |  					  I40E_PF_ARQLEN_ARQENABLE_MASK)); +		reg = rd32(hw, I40E_PF_ARQBAL);  	}  	/* Update tail in the HW to post pre-allocated buffers */  	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); + +	/* Check one register to verify that config was applied */ +	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa)) +		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; + +	return ret_code;  }  /**   *  i40e_init_asq - main initialization routine for ASQ - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  This is the main initialization routine for the Admin Send Queue   *  Prior to calling this function, drivers *MUST* set the following fields @@ -383,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)  		goto init_adminq_free_rings;  	/* initialize base registers */ -	i40e_config_asq_regs(hw); +	ret_code = i40e_config_asq_regs(hw); +	if (ret_code) +		goto init_adminq_free_rings;  	/* success! */  	goto init_adminq_exit; @@ -397,7 +425,7 @@ init_adminq_exit:  /**   *  i40e_init_arq - initialize ARQ - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  The main initialization routine for the Admin Receive (Event) Queue.   *  Prior to calling this function, drivers *MUST* set the following fields @@ -440,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)  		goto init_adminq_free_rings;  	/* initialize base registers */ -	i40e_config_arq_regs(hw); +	ret_code = i40e_config_arq_regs(hw); +	if (ret_code) +		goto init_adminq_free_rings;  	/* success! */  	goto init_adminq_exit; @@ -454,7 +484,7 @@ init_adminq_exit:  /**   *  i40e_shutdown_asq - shutdown the ASQ - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  The main shutdown routine for the Admin Send Queue   **/ @@ -466,10 +496,9 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)  		return I40E_ERR_NOT_READY;  	/* Stop firmware AdminQ processing */ -	if (hw->mac.type == I40E_MAC_VF) -		wr32(hw, I40E_VF_ATQLEN1, 0); -	else -		wr32(hw, I40E_PF_ATQLEN, 0); +	wr32(hw, hw->aq.asq.head, 0); +	wr32(hw, hw->aq.asq.tail, 0); +	wr32(hw, hw->aq.asq.len, 0);  	/* make sure lock is available */  	mutex_lock(&hw->aq.asq_mutex); @@ -478,8 +507,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)  	/* free ring buffers */  	i40e_free_asq_bufs(hw); -	/* free the ring descriptors */ -	i40e_free_adminq_asq(hw);  	mutex_unlock(&hw->aq.asq_mutex); @@ -488,7 +515,7 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)  /**   *  i40e_shutdown_arq - shutdown ARQ - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  The main shutdown routine for the Admin Receive Queue   **/ @@ -500,10 +527,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)  		return I40E_ERR_NOT_READY;  	/* Stop firmware AdminQ processing */ -	if (hw->mac.type == I40E_MAC_VF) -		wr32(hw, I40E_VF_ARQLEN1, 0); -	else -		wr32(hw, I40E_PF_ARQLEN, 0); +	wr32(hw, hw->aq.arq.head, 0); +	wr32(hw, hw->aq.arq.tail, 0); +	wr32(hw, hw->aq.arq.len, 0);  	/* make sure lock is available */  	mutex_lock(&hw->aq.arq_mutex); @@ -512,8 +538,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)  	/* free ring buffers */  	i40e_free_arq_bufs(hw); -	/* free the ring descriptors */ -	i40e_free_adminq_arq(hw);  	mutex_unlock(&hw->aq.arq_mutex); @@ -522,7 +546,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)  /**   *  i40e_init_adminq - main initialization routine for Admin Queue - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   *   *  Prior to calling this function, drivers *MUST* set the following fields   *  in the hw->aq structure: @@ -533,8 +557,9 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)   **/  i40e_status i40e_init_adminq(struct i40e_hw *hw)  { -	u16 eetrack_lo, eetrack_hi;  	i40e_status ret_code; +	u16 eetrack_lo, eetrack_hi; +	int retry = 0;  	/* verify input for valid configuration */  	if ((hw->aq.num_arq_entries == 0) || @@ -562,23 +587,41 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)  	if (ret_code)  		goto init_adminq_free_asq; -	ret_code = i40e_aq_get_firmware_version(hw, -				     &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, -				     &hw->aq.api_maj_ver, &hw->aq.api_min_ver, -				     NULL); -	if (ret_code) +	/* There are some cases where the firmware may not be quite ready +	 * for AdminQ operations, so we retry the AdminQ setup a few times +	 * if we see timeouts in this first AQ call. +	 */ +	do { +		ret_code = i40e_aq_get_firmware_version(hw, +							&hw->aq.fw_maj_ver, +							&hw->aq.fw_min_ver, +							&hw->aq.api_maj_ver, +							&hw->aq.api_min_ver, +							NULL); +		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT) +			break; +		retry++; +		msleep(100); +		i40e_resume_aq(hw); +	} while (retry < 10); +	if (ret_code != I40E_SUCCESS)  		goto init_adminq_free_arq; -	if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR || -	    hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) { -		ret_code = I40E_ERR_FIRMWARE_API_VERSION; -		goto init_adminq_free_arq; -	} +	/* get the NVM version info */  	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);  	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);  	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);  	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; +	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { +		ret_code = I40E_ERR_FIRMWARE_API_VERSION; +		goto init_adminq_free_arq; +	} + +	/* pre-emptive resource lock release */ +	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); +	hw->aq.nvm_busy = false; +  	ret_code = i40e_aq_set_hmc_resource_profile(hw,  						    I40E_HMC_PROFILE_DEFAULT,  						    0, @@ -600,12 +643,15 @@ init_adminq_exit:  /**   *  i40e_shutdown_adminq - shutdown routine for the Admin Queue - *  @hw:     pointer to the hardware structure + *  @hw: pointer to the hardware structure   **/  i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)  {  	i40e_status ret_code = 0; +	if (i40e_check_asq_alive(hw)) +		i40e_aq_queue_shutdown(hw, true); +  	i40e_shutdown_asq(hw);  	i40e_shutdown_arq(hw); @@ -616,7 +662,7 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)  /**   *  i40e_clean_asq - cleans Admin send queue - *  @asq: pointer to the adminq send ring + *  @hw: pointer to the hardware structure   *   *  returns the number of free desc   **/ @@ -637,9 +683,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)  			desc_cb = *desc;  			cb_func(hw, &desc_cb);  		} -		memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); -		memset((void *)details, 0, -		       sizeof(struct i40e_asq_cmd_details)); +		memset(desc, 0, sizeof(*desc)); +		memset(details, 0, sizeof(*details));  		ntc++;  		if (ntc == asq->count)  			ntc = 0; @@ -659,12 +704,12 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)   *  Returns true if the firmware has processed all descriptors on the   *  admin send queue. Returns false if there are still requests pending.   **/ -bool i40e_asq_done(struct i40e_hw *hw) +static bool i40e_asq_done(struct i40e_hw *hw)  {  	/* AQ designers suggest use of head for better  	 * timing reliability than DD bit  	 */ -	return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use); +	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;  } @@ -674,7 +719,7 @@ bool i40e_asq_done(struct i40e_hw *hw)   *  @desc: prefilled descriptor describing the command (non DMA mem)   *  @buff: buffer to use for indirect commands   *  @buff_size: size of buffer for indirect commands - *  @opaque: pointer to info to be used in async cleanup + *  @cmd_details: pointer to command details structure   *   *  This is the main send command driver routine for the Admin Queue send   *  queue.  It runs the queue, cleans the queue, etc @@ -699,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,  		goto asq_send_command_exit;  	} +	if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { +		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); +		status = I40E_ERR_NVM; +		goto asq_send_command_exit; +	} +  	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);  	if (cmd_details) {  		*details = *cmd_details; @@ -826,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,  		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;  	} +	if (i40e_is_nvm_update_op(desc)) +		hw->aq.nvm_busy = true; +  	/* update the error if time out occurred */  	if ((!cmd_completed) &&  	    (!details->async && !details->postpone)) { @@ -854,7 +908,7 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,  	/* zero out the desc */  	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));  	desc->opcode = cpu_to_le16(opcode); -	desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI); +	desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);  }  /** @@ -912,7 +966,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,  			   "AQRX: Event received with error 0x%X.\n",  			   hw->aq.arq_last_status);  	} else { -		memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc)); +		e->desc = *desc;  		datalen = le16_to_cpu(desc->datalen);  		e->msg_size = min(datalen, e->msg_size);  		if (e->msg_buf != NULL && (e->msg_size != 0)) @@ -920,11 +974,19 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,  			       e->msg_size);  	} +	if (i40e_is_nvm_update_op(&e->desc)) +		hw->aq.nvm_busy = false; +  	/* Restore the original datalen and buffer address in the desc,  	 * FW updates datalen to indicate the event message  	 * size  	 */  	bi = &hw->aq.arq.r.arq_bi[ntc]; +	memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); + +	desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF); +	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) +		desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);  	desc->datalen = cpu_to_le16((u16)bi->size);  	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));  	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa)); @@ -947,36 +1009,16 @@ clean_arq_element_out:  	return ret_code;  } -void i40e_resume_aq(struct i40e_hw *hw) +static void i40e_resume_aq(struct i40e_hw *hw)  { -	u32 reg = 0; -  	/* Registers are reset after PF reset */  	hw->aq.asq.next_to_use = 0;  	hw->aq.asq.next_to_clean = 0;  	i40e_config_asq_regs(hw); -	reg = hw->aq.num_asq_entries; - -	if (hw->mac.type == I40E_MAC_VF) { -		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK; -		wr32(hw, I40E_VF_ATQLEN1, reg); -	} else { -		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK; -		wr32(hw, I40E_PF_ATQLEN, reg); -	}  	hw->aq.arq.next_to_use = 0;  	hw->aq.arq.next_to_clean = 0;  	i40e_config_arq_regs(hw); -	reg = hw->aq.num_arq_entries; - -	if (hw->mac.type == I40E_MAC_VF) { -		reg |= I40E_VF_ATQLEN_ATQENABLE_MASK; -		wr32(hw, I40E_VF_ARQLEN1, reg); -	} else { -		reg |= I40E_PF_ATQLEN_ATQENABLE_MASK; -		wr32(hw, I40E_PF_ARQLEN, reg); -	}  } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 22e5ed683e4..b1552fbc48a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -32,20 +31,20 @@  #include "i40e_adminq_cmd.h"  #define I40E_ADMINQ_DESC(R, i)   \ -	(&(((struct i40e_aq_desc *)((R).desc))[i])) +	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))  #define I40E_ADMINQ_DESC_ALIGNMENT 4096  struct i40e_adminq_ring { -	void *desc;		/* Descriptor ring memory */ -	void *details;		/* ASQ details */ +	struct i40e_virt_mem dma_head;	/* space for dma structures */ +	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */ +	struct i40e_virt_mem cmd_buf;	/* command buffer memory */  	union {  		struct i40e_dma_mem *asq_bi;  		struct i40e_dma_mem *arq_bi;  	} r; -	u64 dma_addr;		/* Physical address of the ring */  	u16 count;		/* Number of descriptors */  	u16 rx_buf_len;		/* Admin Receive Queue buffer length */ @@ -56,6 +55,7 @@ struct i40e_adminq_ring {  	/* used for queue tracking */  	u32 head;  	u32 tail; +	u32 len;  };  /* ASQ transaction details */ @@ -69,7 +69,7 @@ struct i40e_asq_cmd_details {  };  #define I40E_ADMINQ_DETAILS(R, i)   \ -	(&(((struct i40e_asq_cmd_details *)((R).details))[i])) +	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))  /* ARQ event information */  struct i40e_arq_event_info { @@ -90,13 +90,11 @@ struct i40e_adminq_info {  	u16 fw_min_ver;                 /* firmware minor version */  	u16 api_maj_ver;                /* api major version */  	u16 api_min_ver;                /* api minor version */ +	bool nvm_busy;  	struct mutex asq_mutex; /* Send queue lock */  	struct mutex arq_mutex; /* Receive queue lock */ -	struct i40e_dma_mem asq_mem;    /* send queue dynamic memory */ -	struct i40e_dma_mem arq_mem;    /* receive queue dynamic memory */ -  	/* last status values on send and receive queues */  	enum i40e_admin_queue_err asq_last_status;  	enum i40e_admin_queue_err arq_last_status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index e61ebdd5a5f..15f289f2917 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -35,7 +34,7 @@   */  #define I40E_FW_API_VERSION_MAJOR  0x0001 -#define I40E_FW_API_VERSION_MINOR  0x0000 +#define I40E_FW_API_VERSION_MINOR  0x0002  struct i40e_aq_desc {  	__le16 flags; @@ -124,6 +123,7 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_get_version      = 0x0001,  	i40e_aqc_opc_driver_version   = 0x0002,  	i40e_aqc_opc_queue_shutdown   = 0x0003, +	i40e_aqc_opc_set_pf_context   = 0x0004,  	/* resource ownership */  	i40e_aqc_opc_request_resource = 0x0008, @@ -137,10 +137,13 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_set_ns_proxy_entry     = 0x0105,  	/* LAA */ -	i40e_aqc_opc_mng_laa                = 0x0106, +	i40e_aqc_opc_mng_laa                = 0x0106,   /* AQ obsolete */  	i40e_aqc_opc_mac_address_read       = 0x0107,  	i40e_aqc_opc_mac_address_write      = 0x0108, +	/* PXE */ +	i40e_aqc_opc_clear_pxe_mode         = 0x0110, +  	/* internal switch commands */  	i40e_aqc_opc_get_switch_config         = 0x0200,  	i40e_aqc_opc_add_statistics            = 0x0201, @@ -180,9 +183,6 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_add_mirror_rule    = 0x0260,  	i40e_aqc_opc_delete_mirror_rule = 0x0261, -	i40e_aqc_opc_set_storm_control_config = 0x0280, -	i40e_aqc_opc_get_storm_control_config = 0x0281, -  	/* DCB commands */  	i40e_aqc_opc_dcb_ignore_pfc = 0x0301,  	i40e_aqc_opc_dcb_updated    = 0x0302, @@ -205,6 +205,7 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_query_switching_comp_bw_config        = 0x041A,  	i40e_aqc_opc_suspend_port_tx                       = 0x041B,  	i40e_aqc_opc_resume_port_tx                        = 0x041C, +	i40e_aqc_opc_configure_partition_bw                = 0x041D,  	/* hmc */  	i40e_aqc_opc_query_hmc_resource_profile = 0x0500, @@ -222,13 +223,15 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_get_partner_advt    = 0x0616,  	i40e_aqc_opc_set_lb_modes        = 0x0618,  	i40e_aqc_opc_get_phy_wol_caps    = 0x0621, -	i40e_aqc_opc_set_phy_reset       = 0x0622, +	i40e_aqc_opc_set_phy_debug	 = 0x0622,  	i40e_aqc_opc_upload_ext_phy_fm   = 0x0625,  	/* NVM commands */ -	i40e_aqc_opc_nvm_read   = 0x0701, -	i40e_aqc_opc_nvm_erase  = 0x0702, -	i40e_aqc_opc_nvm_update = 0x0703, +	i40e_aqc_opc_nvm_read         = 0x0701, +	i40e_aqc_opc_nvm_erase        = 0x0702, +	i40e_aqc_opc_nvm_update       = 0x0703, +	i40e_aqc_opc_nvm_config_read  = 0x0704, +	i40e_aqc_opc_nvm_config_write = 0x0705,  	/* virtualization commands */  	i40e_aqc_opc_send_msg_to_pf   = 0x0801, @@ -270,8 +273,6 @@ enum i40e_admin_queue_opc {  	i40e_aqc_opc_debug_set_mode         = 0xFF01,  	i40e_aqc_opc_debug_read_reg         = 0xFF03,  	i40e_aqc_opc_debug_write_reg        = 0xFF04, -	i40e_aqc_opc_debug_read_reg_sg      = 0xFF05, -	i40e_aqc_opc_debug_write_reg_sg     = 0xFF06,  	i40e_aqc_opc_debug_modify_reg       = 0xFF07,  	i40e_aqc_opc_debug_dump_internals   = 0xFF08,  	i40e_aqc_opc_debug_modify_internals = 0xFF09, @@ -317,13 +318,15 @@ struct i40e_aqc_get_version {  I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); -/* Send driver version (direct 0x0002) */ +/* Send driver version (indirect 0x0002) */  struct i40e_aqc_driver_version {  	u8     driver_major_ver;  	u8     driver_minor_ver;  	u8     driver_build_ver;  	u8     driver_subbuild_ver; -	u8     reserved[12]; +	u8     reserved[4]; +	__le32 address_high; +	__le32 address_low;  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); @@ -337,6 +340,14 @@ struct i40e_aqc_queue_shutdown {  I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { +	u8	pf_id; +	u8	reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); +  /* Request resource ownership (direct 0x0008)   * Release resource ownership (direct 0x0009)   */ @@ -479,7 +490,7 @@ struct i40e_aqc_mng_laa {  	u8     reserved2[6];  }; -/* Manage MAC Address Read Command (0x0107) */ +/* Manage MAC Address Read Command (indirect 0x0107) */  struct i40e_aqc_mac_address_read {  	__le16	command_flags;  #define I40E_AQC_LAN_ADDR_VALID   0x10 @@ -517,6 +528,16 @@ struct i40e_aqc_mac_address_write {  I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response  (direct 0x0110) */ +struct i40e_aqc_clear_pxe { +	u8	rx_cnt; +	u8	reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); +  /* Switch configuration commands (0x02xx) */  /* Used by many indirect commands that only pass an seid and a buffer in the @@ -639,13 +660,15 @@ struct i40e_aqc_switch_resource_alloc_element_resp {  	u8     reserved2[6];  }; -/* Add VSI (indirect 0x210) +/* Add VSI (indirect 0x0210)   *    this indirect command uses struct i40e_aqc_vsi_properties_data   *    as the indirect buffer (128 bytes)   * - * Update VSI (indirect 0x211) Get VSI (indirect 0x0212) - *    use the generic i40e_aqc_switch_seid descriptor format - *    use the same completion and data structure as Add VSI + * Update VSI (indirect 0x211) + *     uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + *     uses the same completion and data structure as Add VSI   */  struct i40e_aqc_add_get_update_vsi {  	__le16 uplink_seid; @@ -664,7 +687,6 @@ struct i40e_aqc_add_get_update_vsi {  #define I40E_AQ_VSI_TYPE_PF             0x2  #define I40E_AQ_VSI_TYPE_EMP_MNG        0x3  #define I40E_AQ_VSI_FLAG_CASCADED_PV    0x4 -#define I40E_AQ_VSI_FLAG_CLOUD_VSI      0x8  	__le32 addr_high;  	__le32 addr_low;  }; @@ -1026,7 +1048,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {  #define I40E_AQC_SET_VSI_PROMISC_VLAN        0x10  	__le16 seid;  #define I40E_AQC_VSI_PROM_CMD_SEID_MASK      0x3FF -	u8     reserved[10]; +	__le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID          0x8000 +	u8     reserved[8];  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1179,33 +1203,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {  		} v4;  		struct {  			u8 data[16]; -			} v6; -		} ipaddr; +		} v6; +	} ipaddr;  	__le16 flags;  #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                 0  #define I40E_AQC_ADD_CLOUD_FILTER_MASK                  (0x3F << \  					I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */  #define I40E_AQC_ADD_CLOUD_FILTER_OIP                   0x0001 -#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE               0x0002 +/* 0x0002 reserved */  #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN            0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE        0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID     0x0004 +/* 0x0005 reserved */  #define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID           0x0006 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL        0x0007 +/* 0x0007 reserved */  /* 0x0008 reserved */  #define I40E_AQC_ADD_CLOUD_FILTER_OMAC                  0x0009  #define I40E_AQC_ADD_CLOUD_FILTER_IMAC                  0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC      0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP                   0x000C +  #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE               0x0080  #define I40E_AQC_ADD_CLOUD_VNK_SHIFT                    6  #define I40E_AQC_ADD_CLOUD_VNK_MASK                     0x00C0  #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                   0  #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                   0x0100 -	__le32 key_low; -	__le32 key_high; + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT               9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK                0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN               0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC          1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3 + +	__le32 tenant_id; +	u8     reserved[4];  	__le16 queue_number;  #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0  #define I40E_AQC_ADD_CLOUD_QUEUE_MASK                   (0x3F << \  					I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) -	u8     reserved[14]; +	u8     reserved2[14];  	/* response section */  	u8     allocation_result;  #define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS         0x0 @@ -1259,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {  I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); -/* Set Storm Control Configuration (direct 0x0280) - * Get Storm Control Configuration (direct 0x0281) - *    the command and response use the same descriptor structure - */ -struct i40e_aqc_set_get_storm_control_config { -	__le32 broadcast_threshold; -	__le32 multicast_threshold; -	__le32 control_flags; -#define I40E_AQC_STORM_CONTROL_MDIPW            0x01 -#define I40E_AQC_STORM_CONTROL_MDICW            0x02 -#define I40E_AQC_STORM_CONTROL_BDIPW            0x04 -#define I40E_AQC_STORM_CONTROL_BDICW            0x08 -#define I40E_AQC_STORM_CONTROL_BIDU             0x10 -#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT   8 -#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK    (0x3FF << \ -					I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT) -	u8     reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config); -  /* DCB 0x03xx*/  /* PFC Ignore (direct 0x0301) @@ -1397,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);  struct i40e_aqc_configure_switching_comp_ets_data {  	u8     reserved[4];  	u8     tc_valid_bits; -	u8     reserved1; +	u8     seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK     0x1  	u8     tc_strict_priority_flags; -	u8     reserved2[17]; +	u8     reserved1[17];  	u8     tc_bw_share_credits[8]; -	u8     reserved3[96]; +	u8     reserved2[96];  };  /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ @@ -1469,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {   * (direct 0x041B and 0x041C) uses the generic SEID struct   */ +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { +	__le16 pf_valid_bits; +	u8     min_bw[16];      /* guaranteed bandwidth */ +	u8     max_bw[16];      /* bandwidth limit */ +}; +  /* Get and set the active HMC resource profile and status.   * (direct 0x0500) and (direct 0x0501)   */ @@ -1509,6 +1535,8 @@ enum i40e_aq_phy_type {  	I40E_PHY_TYPE_XLPPI			= 0x9,  	I40E_PHY_TYPE_40GBASE_CR4_CU		= 0xA,  	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB, +	I40E_PHY_TYPE_10GBASE_AOC		= 0xC, +	I40E_PHY_TYPE_40GBASE_AOC		= 0xD,  	I40E_PHY_TYPE_100BASE_TX		= 0x11,  	I40E_PHY_TYPE_1000BASE_T		= 0x12,  	I40E_PHY_TYPE_10GBASE_T			= 0x13, @@ -1519,7 +1547,10 @@ enum i40e_aq_phy_type {  	I40E_PHY_TYPE_40GBASE_CR4		= 0x18,  	I40E_PHY_TYPE_40GBASE_SR4		= 0x19,  	I40E_PHY_TYPE_40GBASE_LR4		= 0x1A, -	I40E_PHY_TYPE_20GBASE_KR2		= 0x1B, +	I40E_PHY_TYPE_1000BASE_SX		= 0x1B, +	I40E_PHY_TYPE_1000BASE_LX		= 0x1C, +	I40E_PHY_TYPE_1000BASE_T_OPTICAL	= 0x1D, +	I40E_PHY_TYPE_20GBASE_KR2		= 0x1E,  	I40E_PHY_TYPE_MAX  }; @@ -1548,16 +1579,13 @@ struct i40e_aqc_module_desc {  struct i40e_aq_get_phy_abilities_resp {  	__le32 phy_type;       /* bitmap using the above enum for offsets */ -	u8     link_speed;     /* bitmap using the above enum */ +	u8     link_speed;     /* bitmap using the above enum bit patterns */  	u8     abilities;  #define I40E_AQ_PHY_FLAG_PAUSE_TX         0x01  #define I40E_AQ_PHY_FLAG_PAUSE_RX         0x02  #define I40E_AQ_PHY_FLAG_LOW_POWER        0x04 -#define I40E_AQ_PHY_FLAG_AN_SHIFT         3 -#define I40E_AQ_PHY_FLAG_AN_MASK          (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT) -#define I40E_AQ_PHY_FLAG_AN_OFF           0x00 /* link forced on */ -#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01 -#define I40E_AQ_PHY_FLAG_AN_ON            0x02 +#define I40E_AQ_PHY_LINK_ENABLED		  0x08 +#define I40E_AQ_PHY_AN_ENABLED			  0x10  #define I40E_AQ_PHY_FLAG_MODULE_QUAL      0x20  	__le16 eee_capability;  #define I40E_AQ_EEE_100BASE_TX       0x0002 @@ -1582,6 +1610,10 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */  	__le32 phy_type;  	u8     link_speed;  	u8     abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK		0x08 +#define I40E_AQ_PHY_ENABLE_AN		0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20  	__le16 eee_capability;  	__le32 eeer;  	u8     low_power_ctrl; @@ -1662,6 +1694,7 @@ struct i40e_aqc_get_link_status {  #define I40E_AQ_LINK_TX_ACTIVE       0x00  #define I40E_AQ_LINK_TX_DRAINED      0x01  #define I40E_AQ_LINK_TX_FLUSHED      0x03 +#define I40E_AQ_LINK_FORCED_40G      0x10  	u8     loopback;         /* use defines from i40e_aqc_set_lb_mode */  	__le16 max_frame_size;  	u8     config; @@ -1713,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {  I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); -/* Set PHY Reset command (0x0622) */ -struct i40e_aqc_set_phy_reset { -	u8     reset_flags; -#define I40E_AQ_PHY_RESET_REQUEST  0x02 +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { +	u8     command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL	0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT	2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK	(0x03 << \ +					I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE	0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10  	u8     reserved[15];  }; -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset); +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);  enum i40e_aq_phy_reg_type {  	I40E_AQC_PHY_REG_INTERNAL         = 0x1, @@ -1745,6 +1785,47 @@ struct i40e_aqc_nvm_update {  I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { +	__le16 cmd_flags; +#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1 +#define ANVM_READ_SINGLE_FEATURE		0 +#define ANVM_READ_MULTIPLE_FEATURES		1 +	__le16 element_count; +	__le16 element_id;		/* Feature/field ID */ +	u8     reserved[2]; +	__le32 address_high; +	__le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { +	__le16 cmd_flags; +	__le16 element_count; +	u8     reserved[4]; +	__le32 address_high; +	__le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +struct i40e_aqc_nvm_config_data_feature { +	__le16 feature_id; +	__le16 instance_id; +	__le16 feature_options; +	__le16 feature_selection; +}; + +struct i40e_aqc_nvm_config_data_immediate_field { +#define ANVM_FEATURE_OR_IMMEDIATE_MASK	0x2 +	__le16 field_id; +	__le16 instance_id; +	__le16 field_options; +	__le16 field_value; +}; +  /* Send to PF command (indirect 0x0801) id is only used by PF   * Send to VF command (indirect 0x0802) id is only used by PF   * Send to Peer PF command (indirect 0x0803) @@ -1914,22 +1995,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);  /* Add Udp Tunnel command and completion (direct 0x0B00) */  struct i40e_aqc_add_udp_tunnel {  	__le16 udp_port; -	u8     header_len; /* in DWords, 1 to 15 */ -	u8     protocol_index; -#define I40E_AQC_TUNNEL_TYPE_MAC    0x0 -#define I40E_AQC_TUNNEL_TYPE_UDP    0x1 -	u8     reserved[12]; +	u8     reserved0[3]; +	u8     protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE	0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10 +	u8     reserved1[10];  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); +struct i40e_aqc_add_udp_tunnel_completion { +	__le16 udp_port; +	u8     filter_entry_index; +	u8     multiple_pfs; +#define I40E_AQC_SINGLE_PF	0x0 +#define I40E_AQC_MULTIPLE_PFS	0x1 +	u8     total_filters; +	u8     reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); +  /* remove UDP Tunnel command (0x0B01) */  struct i40e_aqc_remove_udp_tunnel {  	u8     reserved[2];  	u8     index; /* 0 to 15 */ -	u8     pf_filters; -	u8     total_filters; -	u8     reserved2[11]; +	u8     reserved2[13];  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); @@ -1937,28 +2029,32 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);  struct i40e_aqc_del_udp_tunnel_completion {  	__le16 udp_port;  	u8     index; /* 0 to 15 */ -	u8     multiple_entries; -	u8     tunnels_used; -	u8     reserved; -	u8     tunnels_free; -	u8     reserved1[9]; +	u8     multiple_pfs; +	u8     total_filters_used; +	u8     reserved1[11];  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);  /* tunnel key structure 0x0B10 */ +  struct i40e_aqc_tunnel_key_structure { -	__le16     key1_off; -	__le16     key1_len; -	__le16     key2_off; -	__le16     key2_len; -	__le16     flags; +	u8	key1_off; +	u8	key2_off; +	u8	key1_len;  /* 0 to 15 */ +	u8	key2_len;  /* 0 to 15 */ +	u8	flags;  #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01  /* response flags */  #define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS    0x01  #define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED   0x02  #define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 -	u8         resreved[6]; +	u8	network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP	0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE			0x3 +	u8	reserved[10];  };  I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); @@ -2052,6 +2148,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);  #define I40E_AQ_CLUSTER_ID_DCB		8  #define I40E_AQ_CLUSTER_ID_EMP_MEM	9  #define I40E_AQ_CLUSTER_ID_PKT_BUF	10 +#define I40E_AQ_CLUSTER_ID_ALTRAM	11  struct i40e_aqc_debug_dump_internals {  	u8     cluster_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index 3b1cc214f9d..926811ad44a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 1e4ea134975..6e65f19dd6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -43,20 +42,18 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)  	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {  		switch (hw->device_id) { -		case I40E_SFP_XL710_DEVICE_ID: -		case I40E_SFP_X710_DEVICE_ID: -		case I40E_QEMU_DEVICE_ID: -		case I40E_KX_A_DEVICE_ID: -		case I40E_KX_B_DEVICE_ID: -		case I40E_KX_C_DEVICE_ID: -		case I40E_KX_D_DEVICE_ID: -		case I40E_QSFP_A_DEVICE_ID: -		case I40E_QSFP_B_DEVICE_ID: -		case I40E_QSFP_C_DEVICE_ID: +		case I40E_DEV_ID_SFP_XL710: +		case I40E_DEV_ID_QEMU: +		case I40E_DEV_ID_KX_A: +		case I40E_DEV_ID_KX_B: +		case I40E_DEV_ID_KX_C: +		case I40E_DEV_ID_QSFP_A: +		case I40E_DEV_ID_QSFP_B: +		case I40E_DEV_ID_QSFP_C:  			hw->mac.type = I40E_MAC_XL710;  			break; -		case I40E_VF_DEVICE_ID: -		case I40E_VF_HV_DEVICE_ID: +		case I40E_DEV_ID_VF: +		case I40E_DEV_ID_VF_HV:  			hw->mac.type = I40E_MAC_VF;  			break;  		default: @@ -75,7 +72,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)  /**   * i40e_debug_aq   * @hw: debug mask related to admin queue - * @cap: pointer to adminq command descriptor + * @mask: debug mask + * @desc: pointer to admin queue descriptor   * @buffer: pointer to command buffer   *   * Dumps debug log about adminq command with descriptor contents. @@ -126,6 +124,413 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,  }  /** + * i40e_check_asq_alive + * @hw: pointer to the hw struct + * + * Returns true if Queue is enabled else false. + **/ +bool i40e_check_asq_alive(struct i40e_hw *hw) +{ +	if (hw->aq.asq.len) +		return !!(rd32(hw, hw->aq.asq.len) & +			  I40E_PF_ATQLEN_ATQENABLE_MASK); +	else +		return false; +} + +/** + * i40e_aq_queue_shutdown + * @hw: pointer to the hw struct + * @unloading: is the driver unloading itself + * + * Tell the Firmware that we're shutting down the AdminQ and whether + * or not the driver is unloading as well. + **/ +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, +					     bool unloading) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_queue_shutdown *cmd = +		(struct i40e_aqc_queue_shutdown *)&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, +					  i40e_aqc_opc_queue_shutdown); + +	if (unloading) +		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); +	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + +	return status; +} + +/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the + * hardware to a bit-field that can be used by SW to more easily determine the + * packet type. + * + * Macros are used to shorten the table lines and make this table human + * readable. + * + * We store the PTYPE in the top byte of the bit field - this is just so that + * we can check that the table doesn't have a row missing, as the index into + * the table should be the PTYPE. + * + * Typical work flow: + * + * IF NOT i40e_ptype_lookup[ptype].known + * THEN + *      Packet is unknown + * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP + *      Use the rest of the fields to look at the tunnels, inner protocols, etc + * ELSE + *      Use the enum i40e_rx_l2_ptype to decode the packet type + * ENDIF + */ + +/* macro to make the table lines short */ +#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ +	{	PTYPE, \ +		1, \ +		I40E_RX_PTYPE_OUTER_##OUTER_IP, \ +		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ +		I40E_RX_PTYPE_##OUTER_FRAG, \ +		I40E_RX_PTYPE_TUNNEL_##T, \ +		I40E_RX_PTYPE_TUNNEL_END_##TE, \ +		I40E_RX_PTYPE_##TEF, \ +		I40E_RX_PTYPE_INNER_PROT_##I, \ +		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } + +#define I40E_PTT_UNUSED_ENTRY(PTYPE) \ +		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + +/* shorter macros makes the table fit but are terse */ +#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG +#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG +#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC + +/* Lookup table mapping the HW PTYPE to the bit field for decoding */ +struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { +	/* L2 Packet types */ +	I40E_PTT_UNUSED_ENTRY(0), +	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), +	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2), +	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), +	I40E_PTT_UNUSED_ENTRY(4), +	I40E_PTT_UNUSED_ENTRY(5), +	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), +	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), +	I40E_PTT_UNUSED_ENTRY(8), +	I40E_PTT_UNUSED_ENTRY(9), +	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), +	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), +	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), + +	/* Non Tunneled IPv4 */ +	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(25), +	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4), +	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), +	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + +	/* IPv4 --> IPv4 */ +	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), +	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), +	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(32), +	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + +	/* IPv4 --> IPv6 */ +	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), +	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), +	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(39), +	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + +	/* IPv4 --> GRE/NAT */ +	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + +	/* IPv4 --> GRE/NAT --> IPv4 */ +	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), +	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), +	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(47), +	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + +	/* IPv4 --> GRE/NAT --> IPv6 */ +	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), +	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), +	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(54), +	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + +	/* IPv4 --> GRE/NAT --> MAC */ +	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + +	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */ +	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), +	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), +	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(62), +	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + +	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */ +	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), +	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), +	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(69), +	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + +	/* IPv4 --> GRE/NAT --> MAC/VLAN */ +	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + +	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ +	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), +	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), +	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(77), +	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + +	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ +	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), +	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), +	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(84), +	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + +	/* Non Tunneled IPv6 */ +	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), +	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3), +	I40E_PTT_UNUSED_ENTRY(91), +	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4), +	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), +	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + +	/* IPv6 --> IPv4 */ +	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), +	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), +	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(98), +	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + +	/* IPv6 --> IPv6 */ +	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), +	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), +	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(105), +	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT */ +	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + +	/* IPv6 --> GRE/NAT -> IPv4 */ +	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), +	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), +	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(113), +	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT -> IPv6 */ +	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), +	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), +	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(120), +	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT -> MAC */ +	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + +	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */ +	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), +	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), +	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(128), +	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */ +	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), +	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), +	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(135), +	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT -> MAC/VLAN */ +	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + +	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ +	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), +	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), +	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(143), +	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4), +	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), +	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + +	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ +	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), +	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), +	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4), +	I40E_PTT_UNUSED_ENTRY(150), +	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4), +	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), +	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + +	/* unused entries */ +	I40E_PTT_UNUSED_ENTRY(154), +	I40E_PTT_UNUSED_ENTRY(155), +	I40E_PTT_UNUSED_ENTRY(156), +	I40E_PTT_UNUSED_ENTRY(157), +	I40E_PTT_UNUSED_ENTRY(158), +	I40E_PTT_UNUSED_ENTRY(159), + +	I40E_PTT_UNUSED_ENTRY(160), +	I40E_PTT_UNUSED_ENTRY(161), +	I40E_PTT_UNUSED_ENTRY(162), +	I40E_PTT_UNUSED_ENTRY(163), +	I40E_PTT_UNUSED_ENTRY(164), +	I40E_PTT_UNUSED_ENTRY(165), +	I40E_PTT_UNUSED_ENTRY(166), +	I40E_PTT_UNUSED_ENTRY(167), +	I40E_PTT_UNUSED_ENTRY(168), +	I40E_PTT_UNUSED_ENTRY(169), + +	I40E_PTT_UNUSED_ENTRY(170), +	I40E_PTT_UNUSED_ENTRY(171), +	I40E_PTT_UNUSED_ENTRY(172), +	I40E_PTT_UNUSED_ENTRY(173), +	I40E_PTT_UNUSED_ENTRY(174), +	I40E_PTT_UNUSED_ENTRY(175), +	I40E_PTT_UNUSED_ENTRY(176), +	I40E_PTT_UNUSED_ENTRY(177), +	I40E_PTT_UNUSED_ENTRY(178), +	I40E_PTT_UNUSED_ENTRY(179), + +	I40E_PTT_UNUSED_ENTRY(180), +	I40E_PTT_UNUSED_ENTRY(181), +	I40E_PTT_UNUSED_ENTRY(182), +	I40E_PTT_UNUSED_ENTRY(183), +	I40E_PTT_UNUSED_ENTRY(184), +	I40E_PTT_UNUSED_ENTRY(185), +	I40E_PTT_UNUSED_ENTRY(186), +	I40E_PTT_UNUSED_ENTRY(187), +	I40E_PTT_UNUSED_ENTRY(188), +	I40E_PTT_UNUSED_ENTRY(189), + +	I40E_PTT_UNUSED_ENTRY(190), +	I40E_PTT_UNUSED_ENTRY(191), +	I40E_PTT_UNUSED_ENTRY(192), +	I40E_PTT_UNUSED_ENTRY(193), +	I40E_PTT_UNUSED_ENTRY(194), +	I40E_PTT_UNUSED_ENTRY(195), +	I40E_PTT_UNUSED_ENTRY(196), +	I40E_PTT_UNUSED_ENTRY(197), +	I40E_PTT_UNUSED_ENTRY(198), +	I40E_PTT_UNUSED_ENTRY(199), + +	I40E_PTT_UNUSED_ENTRY(200), +	I40E_PTT_UNUSED_ENTRY(201), +	I40E_PTT_UNUSED_ENTRY(202), +	I40E_PTT_UNUSED_ENTRY(203), +	I40E_PTT_UNUSED_ENTRY(204), +	I40E_PTT_UNUSED_ENTRY(205), +	I40E_PTT_UNUSED_ENTRY(206), +	I40E_PTT_UNUSED_ENTRY(207), +	I40E_PTT_UNUSED_ENTRY(208), +	I40E_PTT_UNUSED_ENTRY(209), + +	I40E_PTT_UNUSED_ENTRY(210), +	I40E_PTT_UNUSED_ENTRY(211), +	I40E_PTT_UNUSED_ENTRY(212), +	I40E_PTT_UNUSED_ENTRY(213), +	I40E_PTT_UNUSED_ENTRY(214), +	I40E_PTT_UNUSED_ENTRY(215), +	I40E_PTT_UNUSED_ENTRY(216), +	I40E_PTT_UNUSED_ENTRY(217), +	I40E_PTT_UNUSED_ENTRY(218), +	I40E_PTT_UNUSED_ENTRY(219), + +	I40E_PTT_UNUSED_ENTRY(220), +	I40E_PTT_UNUSED_ENTRY(221), +	I40E_PTT_UNUSED_ENTRY(222), +	I40E_PTT_UNUSED_ENTRY(223), +	I40E_PTT_UNUSED_ENTRY(224), +	I40E_PTT_UNUSED_ENTRY(225), +	I40E_PTT_UNUSED_ENTRY(226), +	I40E_PTT_UNUSED_ENTRY(227), +	I40E_PTT_UNUSED_ENTRY(228), +	I40E_PTT_UNUSED_ENTRY(229), + +	I40E_PTT_UNUSED_ENTRY(230), +	I40E_PTT_UNUSED_ENTRY(231), +	I40E_PTT_UNUSED_ENTRY(232), +	I40E_PTT_UNUSED_ENTRY(233), +	I40E_PTT_UNUSED_ENTRY(234), +	I40E_PTT_UNUSED_ENTRY(235), +	I40E_PTT_UNUSED_ENTRY(236), +	I40E_PTT_UNUSED_ENTRY(237), +	I40E_PTT_UNUSED_ENTRY(238), +	I40E_PTT_UNUSED_ENTRY(239), + +	I40E_PTT_UNUSED_ENTRY(240), +	I40E_PTT_UNUSED_ENTRY(241), +	I40E_PTT_UNUSED_ENTRY(242), +	I40E_PTT_UNUSED_ENTRY(243), +	I40E_PTT_UNUSED_ENTRY(244), +	I40E_PTT_UNUSED_ENTRY(245), +	I40E_PTT_UNUSED_ENTRY(246), +	I40E_PTT_UNUSED_ENTRY(247), +	I40E_PTT_UNUSED_ENTRY(248), +	I40E_PTT_UNUSED_ENTRY(249), + +	I40E_PTT_UNUSED_ENTRY(250), +	I40E_PTT_UNUSED_ENTRY(251), +	I40E_PTT_UNUSED_ENTRY(252), +	I40E_PTT_UNUSED_ENTRY(253), +	I40E_PTT_UNUSED_ENTRY(254), +	I40E_PTT_UNUSED_ENTRY(255) +}; + + +/**   * i40e_init_shared_code - Initialize the shared code   * @hw: pointer to hardware structure   * @@ -142,14 +547,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)  	i40e_status status = 0;  	u32 reg; -	hw->phy.get_link_info = true; - -	/* Determine port number */ -	reg = rd32(hw, I40E_PFGEN_PORTNUM); -	reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> -	       I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); -	hw->port = (u8)reg; -  	i40e_set_mac_type(hw);  	switch (hw->mac.type) { @@ -160,6 +557,21 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)  		break;  	} +	hw->phy.get_link_info = true; + +	/* Determine port number */ +	reg = rd32(hw, I40E_PFGEN_PORTNUM); +	reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> +	       I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); +	hw->port = (u8)reg; + +	/* Determine the PF number based on the PCI fn */ +	reg = rd32(hw, I40E_GLPCI_CAPSUP); +	if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) +		hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); +	else +		hw->pf_id = (u8)hw->bus.func; +  	status = i40e_init_nvm(hw);  	return status;  } @@ -210,8 +622,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_mac_address_write);  	cmd_data->command_flags = cpu_to_le16(flags); -	memcpy(&cmd_data->mac_sal, &mac_addr[0], 4); -	memcpy(&cmd_data->mac_sah, &mac_addr[4], 2); +	cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); +	cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | +					((u32)mac_addr[3] << 16) | +					((u32)mac_addr[4] << 8) | +					mac_addr[5]);  	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -240,32 +655,83 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)  }  /** - * i40e_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address + * i40e_pre_tx_queue_cfg - pre tx queue configure + * @hw: pointer to the HW structure + * @queue: target pf queue index + * @enable: state change request   * - * Tests a MAC address to ensure it is a valid Individual Address + * Handles hw requirement to indicate intention to enable + * or disable target queue.   **/ -i40e_status i40e_validate_mac_addr(u8 *mac_addr) +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)  { -	i40e_status status = 0; +	u32 abs_queue_idx = hw->func_caps.base_queue + queue; +	u32 reg_block = 0; +	u32 reg_val; -	/* Make sure it is not a multicast address */ -	if (I40E_IS_MULTICAST(mac_addr)) { -		hw_dbg(hw, "MAC address is multicast\n"); -		status = I40E_ERR_INVALID_MAC_ADDR; -	/* Not a broadcast address */ -	} else if (I40E_IS_BROADCAST(mac_addr)) { -		hw_dbg(hw, "MAC address is broadcast\n"); -		status = I40E_ERR_INVALID_MAC_ADDR; -	/* Reject the zero address */ -	} else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && -		   mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { -		hw_dbg(hw, "MAC address is all zeros\n"); -		status = I40E_ERR_INVALID_MAC_ADDR; +	if (abs_queue_idx >= 128) +		reg_block = abs_queue_idx / 128; + +	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); +	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; +	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); + +	if (enable) +		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; +	else +		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; + +	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); +} + +/** + * i40e_get_media_type - Gets media type + * @hw: pointer to the hardware structure + **/ +static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) +{ +	enum i40e_media_type media; + +	switch (hw->phy.link_info.phy_type) { +	case I40E_PHY_TYPE_10GBASE_SR: +	case I40E_PHY_TYPE_10GBASE_LR: +	case I40E_PHY_TYPE_40GBASE_SR4: +	case I40E_PHY_TYPE_40GBASE_LR4: +		media = I40E_MEDIA_TYPE_FIBER; +		break; +	case I40E_PHY_TYPE_100BASE_TX: +	case I40E_PHY_TYPE_1000BASE_T: +	case I40E_PHY_TYPE_10GBASE_T: +		media = I40E_MEDIA_TYPE_BASET; +		break; +	case I40E_PHY_TYPE_10GBASE_CR1_CU: +	case I40E_PHY_TYPE_40GBASE_CR4_CU: +	case I40E_PHY_TYPE_10GBASE_CR1: +	case I40E_PHY_TYPE_40GBASE_CR4: +	case I40E_PHY_TYPE_10GBASE_SFPP_CU: +		media = I40E_MEDIA_TYPE_DA; +		break; +	case I40E_PHY_TYPE_1000BASE_KX: +	case I40E_PHY_TYPE_10GBASE_KX4: +	case I40E_PHY_TYPE_10GBASE_KR: +	case I40E_PHY_TYPE_40GBASE_KR4: +		media = I40E_MEDIA_TYPE_BACKPLANE; +		break; +	case I40E_PHY_TYPE_SGMII: +	case I40E_PHY_TYPE_XAUI: +	case I40E_PHY_TYPE_XFI: +	case I40E_PHY_TYPE_XLAUI: +	case I40E_PHY_TYPE_XLPPI: +	default: +		media = I40E_MEDIA_TYPE_UNKNOWN; +		break;  	} -	return status; + +	return media;  } +#define I40E_PF_RESET_WAIT_COUNT_A0	200 +#define I40E_PF_RESET_WAIT_COUNT	100  /**   * i40e_pf_reset - Reset the PF   * @hw: pointer to the hardware structure @@ -275,7 +741,8 @@ i40e_status i40e_validate_mac_addr(u8 *mac_addr)   **/  i40e_status i40e_pf_reset(struct i40e_hw *hw)  { -	u32 wait_cnt = 0; +	u32 cnt = 0; +	u32 cnt1 = 0;  	u32 reg = 0;  	u32 grst_del; @@ -285,7 +752,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)  	 */  	grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK  			>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; -	for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) { +	for (cnt = 0; cnt < grst_del + 2; cnt++) {  		reg = rd32(hw, I40E_GLGEN_RSTAT);  		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))  			break; @@ -296,17 +763,37 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)  		return I40E_ERR_RESET_FAILED;  	} -	/* Determine the PF number based on the PCI fn */ -	hw->pf_id = (u8)hw->bus.func; +	/* Now Wait for the FW to be ready */ +	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { +		reg = rd32(hw, I40E_GLNVM_ULD); +		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | +			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); +		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | +			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { +			hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); +			break; +		} +		usleep_range(10000, 20000); +	} +	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | +		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { +		hw_dbg(hw, "wait for FW Reset complete timedout\n"); +		hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); +		return I40E_ERR_RESET_FAILED; +	}  	/* If there was a Global Reset in progress when we got here,  	 * we don't need to do the PF Reset  	 */ -	if (!wait_cnt) { +	if (!cnt) { +		if (hw->revision_id == 0) +			cnt = I40E_PF_RESET_WAIT_COUNT_A0; +		else +			cnt = I40E_PF_RESET_WAIT_COUNT;  		reg = rd32(hw, I40E_PFGEN_CTRL);  		wr32(hw, I40E_PFGEN_CTRL,  		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); -		for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) { +		for (; cnt; cnt--) {  			reg = rd32(hw, I40E_PFGEN_CTRL);  			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))  				break; @@ -319,6 +806,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)  	}  	i40e_clear_pxe_mode(hw); +  	return 0;  } @@ -333,12 +821,53 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)  {  	u32 reg; +	if (i40e_check_asq_alive(hw)) +		i40e_aq_clear_pxe_mode(hw, NULL); +  	/* Clear single descriptor fetch/write-back mode */  	reg = rd32(hw, I40E_GLLAN_RCTL_0); -	wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); + +	if (hw->revision_id == 0) { +		/* As a work around clear PXE_MODE instead of setting it */ +		wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); +	} else { +		wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); +	}  }  /** + * i40e_led_is_mine - helper to find matching led + * @hw: pointer to the hw struct + * @idx: index into GPIO registers + * + * returns: 0 if no match, otherwise the value of the GPIO_CTL register + */ +static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) +{ +	u32 gpio_val = 0; +	u32 port; + +	if (!hw->func_caps.led[idx]) +		return 0; + +	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); +	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> +		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; + +	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR +	 * if it is not our port then ignore +	 */ +	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || +	    (port != hw->port)) +		return 0; + +	return gpio_val; +} + +#define I40E_LED0 22 +#define I40E_LINK_ACTIVITY 0xC + +/**   * i40e_led_get - return current on/off mode   * @hw: pointer to the hw struct   * @@ -349,24 +878,20 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)   **/  u32 i40e_led_get(struct i40e_hw *hw)  { -	u32 gpio_val = 0;  	u32 mode = 0; -	u32 port;  	int i; -	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) { -		if (!hw->func_caps.led[i]) -			continue; - -		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i)); -		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) -			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; +	/* as per the documentation GPIO 22-29 are the LED +	 * GPIO pins named LED0..LED7 +	 */ +	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { +		u32 gpio_val = i40e_led_is_mine(hw, i); -		if (port != hw->port) +		if (!gpio_val)  			continue; -		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) -				>> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT; +		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> +			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;  		break;  	} @@ -376,57 +901,69 @@ u32 i40e_led_get(struct i40e_hw *hw)  /**   * i40e_led_set - set new on/off mode   * @hw: pointer to the hw struct - * @mode: 0=off, else on (see EAS for mode details) + * @mode: 0=off, 0xf=on (else see manual for mode details) + * @blink: true if the LED should blink when on, false if steady + * + * if this function is used to turn on the blink it should + * be used to disable the blink when restoring the original state.   **/ -void i40e_led_set(struct i40e_hw *hw, u32 mode) +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)  { -	u32 gpio_val = 0; -	u32 led_mode = 0; -	u32 port;  	int i; -	for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) { -		if (!hw->func_caps.led[i]) -			continue; +	if (mode & 0xfffffff0) +		hw_dbg(hw, "invalid mode passed in %X\n", mode); -		gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i)); -		port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) -			>> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; +	/* as per the documentation GPIO 22-29 are the LED +	 * GPIO pins named LED0..LED7 +	 */ +	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { +		u32 gpio_val = i40e_led_is_mine(hw, i); -		if (port != hw->port) +		if (!gpio_val)  			continue; -		led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & -			    I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;  		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; -		gpio_val |= led_mode; +		/* this & is a bit of paranoia, but serves as a range check */ +		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & +			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); + +		if (mode == I40E_LINK_ACTIVITY) +			blink = false; + +		gpio_val |= (blink ? 1 : 0) << +			    I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT; +  		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); +		break;  	}  }  /* Admin command wrappers */ +  /** - * i40e_aq_queue_shutdown + * i40e_aq_clear_pxe_mode   * @hw: pointer to the hw struct - * @unloading: is the driver unloading itself + * @cmd_details: pointer to command details structure or NULL   * - * Tell the Firmware that we're shutting down the AdminQ and whether - * or not the driver is unloading as well. + * Tell the firmware that the driver is taking over from PXE   **/ -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, -					     bool unloading) +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, +				struct i40e_asq_cmd_details *cmd_details)  { -	struct i40e_aq_desc desc; -	struct i40e_aqc_queue_shutdown *cmd = -		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;  	i40e_status status; +	struct i40e_aq_desc desc; +	struct i40e_aqc_clear_pxe *cmd = +		(struct i40e_aqc_clear_pxe *)&desc.params.raw;  	i40e_fill_default_direct_cmd_desc(&desc, -					  i40e_aqc_opc_queue_shutdown); +					  i40e_aqc_opc_clear_pxe_mode); -	if (unloading) -		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); -	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); +	cmd->rx_cnt = 0x2; + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);  	return status;  } @@ -490,15 +1027,23 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,  		goto aq_get_link_info_exit;  	/* save off old link status information */ -	memcpy(&hw->phy.link_info_old, hw_link_info, -	       sizeof(struct i40e_link_status)); +	hw->phy.link_info_old = *hw_link_info;  	/* update link status */  	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; +	hw->phy.media_type = i40e_get_media_type(hw);  	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;  	hw_link_info->link_info = resp->link_info;  	hw_link_info->an_info = resp->an_info;  	hw_link_info->ext_info = resp->ext_info; +	hw_link_info->loopback = resp->loopback; +	hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); +	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; + +	if (resp->config & I40E_AQ_CONFIG_CRC_ENA) +		hw_link_info->crc_enable = true; +	else +		hw_link_info->crc_enable = false;  	if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))  		hw_link_info->lse_enable = true; @@ -519,7 +1064,7 @@ aq_get_link_info_exit:  /**   * i40e_aq_add_vsi   * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct   * @cmd_details: pointer to command details structure or NULL   *   * Add a VSI context to the hardware. @@ -545,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,  	cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);  	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); -	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) -		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);  	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,  				    sizeof(vsi_ctx->info), cmd_details); @@ -571,7 +1114,8 @@ aq_add_vsi_exit:   * @cmd_details: pointer to command details structure or NULL   **/  i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, -				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) +				u16 seid, bool set, +				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc;  	struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -665,7 +1209,7 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,  /**   * i40e_get_vsi_params - get VSI configuration info   * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct   * @cmd_details: pointer to command details structure or NULL   **/  i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, @@ -673,8 +1217,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; -	struct i40e_aqc_switch_seid *cmd = -		(struct i40e_aqc_switch_seid *)&desc.params.raw; +	struct i40e_aqc_add_get_update_vsi *cmd = +		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;  	struct i40e_aqc_add_get_update_vsi_completion *resp =  		(struct i40e_aqc_add_get_update_vsi_completion *)  		&desc.params.raw; @@ -683,11 +1227,9 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_get_vsi_parameters); -	cmd->seid = cpu_to_le16(vsi_ctx->seid); +	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);  	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); -	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) -		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);  	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,  				    sizeof(vsi_ctx->info), NULL); @@ -707,7 +1249,7 @@ aq_get_vsi_params_exit:  /**   * i40e_aq_update_vsi_params   * @hw: pointer to the hw struct - * @vsi: pointer to a vsi context struct + * @vsi_ctx: pointer to a vsi context struct   * @cmd_details: pointer to command details structure or NULL   *   * Update a VSI context. @@ -717,17 +1259,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; -	struct i40e_aqc_switch_seid *cmd = -		(struct i40e_aqc_switch_seid *)&desc.params.raw; +	struct i40e_aqc_add_get_update_vsi *cmd = +		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;  	i40e_status status;  	i40e_fill_default_direct_cmd_desc(&desc,  					  i40e_aqc_opc_update_vsi_parameters); -	cmd->seid = cpu_to_le16(vsi_ctx->seid); +	cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);  	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); -	if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF) -		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);  	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,  				    sizeof(vsi_ctx->info), cmd_details); @@ -810,7 +1350,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,  /**   * i40e_aq_send_driver_version   * @hw: pointer to the hw struct - * @event: driver event: driver ok, start or stop   * @dv: driver's major, minor version   * @cmd_details: pointer to command details structure or NULL   * @@ -824,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,  	struct i40e_aqc_driver_version *cmd =  		(struct i40e_aqc_driver_version *)&desc.params.raw;  	i40e_status status; +	u16 len;  	if (dv == NULL)  		return I40E_ERR_PARAM; @@ -835,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,  	cmd->driver_minor_ver = dv->minor_version;  	cmd->driver_build_ver = dv->build_version;  	cmd->driver_subbuild_ver = dv->subbuild_version; -	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	len = 0; +	while (len < sizeof(dv->driver_string) && +	       (dv->driver_string[len] < 0x80) && +	       dv->driver_string[len]) +		len++; +	status = i40e_asq_send_command(hw, &desc, dv->driver_string, +				       len, cmd_details);  	return status;  } @@ -873,6 +1420,7 @@ i40e_get_link_status_exit:   * @downlink_seid: the VSI SEID   * @enabled_tc: bitmap of TCs to be enabled   * @default_port: true for default port VSI, false for control port + * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support   * @veb_seid: pointer to where to put the resulting VEB SEID   * @cmd_details: pointer to command details structure or NULL   * @@ -881,7 +1429,8 @@ i40e_get_link_status_exit:   **/  i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,  				u16 downlink_seid, u8 enabled_tc, -				bool default_port, u16 *veb_seid, +				bool default_port, bool enable_l2_filtering, +				u16 *veb_seid,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aq_desc desc; @@ -907,6 +1456,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,  		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;  	else  		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; + +	if (enable_l2_filtering) +		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; +  	cmd->veb_flags = cpu_to_le16(veb_flags);  	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -922,10 +1475,10 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,   * @hw: pointer to the hw struct   * @veb_seid: the SEID of the VEB to query   * @switch_id: the uplink switch id - * @floating_veb: set to true if the VEB is floating + * @floating: set to true if the VEB is floating   * @statistic_index: index of the stats counter block for this VEB   * @vebs_used: number of VEB's used by function - * @vebs_unallocated: total VEB's not reserved by any function + * @vebs_free: total VEB's not reserved by any function   * @cmd_details: pointer to command details structure or NULL   *   * This retrieves the parameters for a particular VEB, specified by @@ -1059,89 +1612,11 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,  }  /** - * i40e_aq_add_vlan - Add VLAN ids to the HW filtering - * @hw: pointer to the hw struct - * @seid: VSI for the vlan filters - * @v_list: list of vlan filters to be added - * @count: length of the list - * @cmd_details: pointer to command details structure or NULL - **/ -i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid, -			struct i40e_aqc_add_remove_vlan_element_data *v_list, -			u8 count, struct i40e_asq_cmd_details *cmd_details) -{ -	struct i40e_aq_desc desc; -	struct i40e_aqc_macvlan *cmd = -		(struct i40e_aqc_macvlan *)&desc.params.raw; -	i40e_status status; -	u16 buf_size; - -	if (count == 0 || !v_list || !hw) -		return I40E_ERR_PARAM; - -	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data); - -	/* prep the rest of the request */ -	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan); -	cmd->num_addresses = cpu_to_le16(count); -	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); -	cmd->seid[1] = 0; -	cmd->seid[2] = 0; - -	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); -	if (buf_size > I40E_AQ_LARGE_BUF) -		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - -	status = i40e_asq_send_command(hw, &desc, v_list, buf_size, -				       cmd_details); - -	return status; -} - -/** - * i40e_aq_remove_vlan - Remove VLANs from the HW filtering - * @hw: pointer to the hw struct - * @seid: VSI for the vlan filters - * @v_list: list of macvlans to be removed - * @count: length of the list - * @cmd_details: pointer to command details structure or NULL - **/ -i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid, -			struct i40e_aqc_add_remove_vlan_element_data *v_list, -			u8 count, struct i40e_asq_cmd_details *cmd_details) -{ -	struct i40e_aq_desc desc; -	struct i40e_aqc_macvlan *cmd = -		(struct i40e_aqc_macvlan *)&desc.params.raw; -	i40e_status status; -	u16 buf_size; - -	if (count == 0 || !v_list || !hw) -		return I40E_ERR_PARAM; - -	buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data); - -	/* prep the rest of the request */ -	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan); -	cmd->num_addresses = cpu_to_le16(count); -	cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID); -	cmd->seid[1] = 0; -	cmd->seid[2] = 0; - -	desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); -	if (buf_size > I40E_AQ_LARGE_BUF) -		desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - -	status = i40e_asq_send_command(hw, &desc, v_list, buf_size, -				       cmd_details); - -	return status; -} - -/**   * i40e_aq_send_msg_to_vf   * @hw: pointer to the hardware structure   * @vfid: vf id to send msg + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code   * @msg: pointer to the msg buffer   * @msglen: msg length   * @cmd_details: pointer to command details @@ -1371,9 +1846,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,  	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;  	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) -		p = (struct i40e_hw_capabilities *)&hw->dev_caps; +		p = &hw->dev_caps;  	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) -		p = (struct i40e_hw_capabilities *)&hw->func_caps; +		p = &hw->func_caps;  	else  		return; @@ -1496,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,  		}  	} +	/* Software override ensuring FCoE is disabled if npar or mfp +	 * mode because it is not supported in these modes. +	 */ +	if (p->npar_enable || p->mfp_mode_1) +		p->fcoe = false; +  	/* additional HW specific goodies that might  	 * someday be HW version specific  	 */ @@ -1519,8 +2000,8 @@ i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details)  {  	struct i40e_aqc_list_capabilites *cmd; -	i40e_status status = 0;  	struct i40e_aq_desc desc; +	i40e_status status = 0;  	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; @@ -1681,6 +2162,63 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,  }  /** + * i40e_aq_add_udp_tunnel + * @hw: pointer to the hw struct + * @udp_port: the UDP port to add + * @header_len: length of the tunneling header length in DWords + * @protocol_index: protocol index type + * @filter_index: pointer to filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, +				u16 udp_port, u8 protocol_index, +				u8 *filter_index, +				struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_add_udp_tunnel *cmd = +		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; +	struct i40e_aqc_del_udp_tunnel_completion *resp = +		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); + +	cmd->udp_port = cpu_to_le16(udp_port); +	cmd->protocol_type = protocol_index; + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	if (!status) +		*filter_index = resp->index; + +	return status; +} + +/** + * i40e_aq_del_udp_tunnel + * @hw: pointer to the hw struct + * @index: filter index + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, +				struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_remove_udp_tunnel *cmd = +		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); + +	cmd->index = index; + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	return status; +} + +/**   * i40e_aq_delete_element - Delete switch element   * @hw: pointer to the hw struct   * @seid: the SEID to delete from the switch @@ -1709,6 +2247,28 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,  }  /** + * i40e_aq_dcb_updated - DCB Updated Command + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * + * EMP will return when the shared RPB settings have been + * recomputed and modified. The retval field in the descriptor + * will be set to 0 when RPB is modified. + **/ +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, +				struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	return status; +} + +/**   * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler   * @hw: pointer to the hw struct   * @seid: seid for the physical port/switching component/vsi @@ -1770,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,  }  /** + * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit + * @hw: pointer to the hw struct + * @seid: VSI seid + * @credit: BW limit credits (0 = disabled) + * @max_credit: Max BW limit credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +				u16 seid, u16 credit, u8 max_credit, +				struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_configure_vsi_bw_limit *cmd = +		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; +	i40e_status status; + +	i40e_fill_default_direct_cmd_desc(&desc, +					  i40e_aqc_opc_configure_vsi_bw_limit); + +	cmd->vsi_seid = cpu_to_le16(seid); +	cmd->credit = cpu_to_le16(credit); +	cmd->max_credit = max_credit; + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	return status; +} + +/**   * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC   * @hw: pointer to the hw struct   * @seid: VSI seid @@ -1787,6 +2376,40 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,  }  /** + * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port + * @hw: pointer to the hw struct + * @seid: seid of the switching component connected to Physical Port + * @ets_data: Buffer holding ETS parameters + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, +		u16 seid, +		struct i40e_aqc_configure_switching_comp_ets_data *ets_data, +		enum i40e_admin_queue_opc opcode, +		struct i40e_asq_cmd_details *cmd_details) +{ +	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, +				    sizeof(*ets_data), opcode, cmd_details); +} + +/** + * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC + * @hw: pointer to the hw struct + * @seid: seid of the switching component + * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +	u16 seid, +	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, +	struct i40e_asq_cmd_details *cmd_details) +{ +	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), +			    i40e_aqc_opc_configure_switching_comp_bw_config, +			    cmd_details); +} + +/**   * i40e_aq_query_vsi_bw_config - Query VSI BW configuration   * @hw: pointer to the hw struct   * @seid: seid of the VSI @@ -1888,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,  {  	u32 fcoe_cntx_size, fcoe_filt_size;  	u32 pe_cntx_size, pe_filt_size; -	u32 fcoe_fmax, pe_fmax; +	u32 fcoe_fmax;  	u32 val;  	/* Validate FCoE settings passed */ @@ -1963,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,  	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)  		return I40E_ERR_INVALID_SIZE; -	/* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */ -	val = rd32(hw, I40E_GLHMC_PEXFMAX); -	pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK) -		   >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT; -	if (pe_filt_size + pe_cntx_size >  pe_fmax) -		return I40E_ERR_INVALID_SIZE; -  	return 0;  } @@ -2039,3 +2655,110 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,  	return 0;  } + +/** + * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter + * @hw: pointer to the hw struct + * @mac_addr: MAC address to use in the filter + * @ethtype: Ethertype to use in the filter + * @flags: Flags that needs to be applied to the filter + * @vsi_seid: seid of the control VSI + * @queue: VSI queue number to send the packet to + * @is_add: Add control packet filter if True else remove + * @stats: Structure to hold information on control filter counts + * @cmd_details: pointer to command details structure or NULL + * + * This command will Add or Remove control packet filter for a control VSI. + * In return it will update the total number of perfect filter count in + * the stats member. + **/ +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, +				u8 *mac_addr, u16 ethtype, u16 flags, +				u16 vsi_seid, u16 queue, bool is_add, +				struct i40e_control_filter_stats *stats, +				struct i40e_asq_cmd_details *cmd_details) +{ +	struct i40e_aq_desc desc; +	struct i40e_aqc_add_remove_control_packet_filter *cmd = +		(struct i40e_aqc_add_remove_control_packet_filter *) +		&desc.params.raw; +	struct i40e_aqc_add_remove_control_packet_filter_completion *resp = +		(struct i40e_aqc_add_remove_control_packet_filter_completion *) +		&desc.params.raw; +	i40e_status status; + +	if (vsi_seid == 0) +		return I40E_ERR_PARAM; + +	if (is_add) { +		i40e_fill_default_direct_cmd_desc(&desc, +				i40e_aqc_opc_add_control_packet_filter); +		cmd->queue = cpu_to_le16(queue); +	} else { +		i40e_fill_default_direct_cmd_desc(&desc, +				i40e_aqc_opc_remove_control_packet_filter); +	} + +	if (mac_addr) +		memcpy(cmd->mac, mac_addr, ETH_ALEN); + +	cmd->etype = cpu_to_le16(ethtype); +	cmd->flags = cpu_to_le16(flags); +	cmd->seid = cpu_to_le16(vsi_seid); + +	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + +	if (!status && stats) { +		stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); +		stats->etype_used = le16_to_cpu(resp->etype_used); +		stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); +		stats->etype_free = le16_to_cpu(resp->etype_free); +	} + +	return status; +} + +/** + * i40e_set_pci_config_data - store PCI bus info + * @hw: pointer to hardware structure + * @link_status: the link status word from PCI config space + * + * Stores the PCI bus info (speed, width, type) within the i40e_hw structure + **/ +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) +{ +	hw->bus.type = i40e_bus_type_pci_express; + +	switch (link_status & PCI_EXP_LNKSTA_NLW) { +	case PCI_EXP_LNKSTA_NLW_X1: +		hw->bus.width = i40e_bus_width_pcie_x1; +		break; +	case PCI_EXP_LNKSTA_NLW_X2: +		hw->bus.width = i40e_bus_width_pcie_x2; +		break; +	case PCI_EXP_LNKSTA_NLW_X4: +		hw->bus.width = i40e_bus_width_pcie_x4; +		break; +	case PCI_EXP_LNKSTA_NLW_X8: +		hw->bus.width = i40e_bus_width_pcie_x8; +		break; +	default: +		hw->bus.width = i40e_bus_width_unknown; +		break; +	} + +	switch (link_status & PCI_EXP_LNKSTA_CLS) { +	case PCI_EXP_LNKSTA_CLS_2_5GB: +		hw->bus.speed = i40e_bus_speed_2500; +		break; +	case PCI_EXP_LNKSTA_CLS_5_0GB: +		hw->bus.speed = i40e_bus_speed_5000; +		break; +	case PCI_EXP_LNKSTA_CLS_8_0GB: +		hw->bus.speed = i40e_bus_speed_8000; +		break; +	default: +		hw->bus.speed = i40e_bus_speed_unknown; +		break; +	} +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c new file mode 100644 index 00000000000..036570d7617 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -0,0 +1,472 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e_adminq.h" +#include "i40e_prototype.h" +#include "i40e_dcb.h" + +/** + * i40e_get_dcbx_status + * @hw: pointer to the hw struct + * @status: Embedded DCBX Engine Status + * + * Get the DCBX status from the Firmware + **/ +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) +{ +	u32 reg; + +	if (!status) +		return I40E_ERR_PARAM; + +	reg = rd32(hw, I40E_PRTDCB_GENS); +	*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >> +			I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT); + +	return 0; +} + +/** + * i40e_parse_ieee_etscfg_tlv + * @tlv: IEEE 802.1Qaz ETS CFG TLV + * @dcbcfg: Local store to update ETS CFG data + * + * Parses IEEE 802.1Qaz ETS CFG TLV + **/ +static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, +				       struct i40e_dcbx_config *dcbcfg) +{ +	struct i40e_ieee_ets_config *etscfg; +	u8 *buf = tlv->tlvinfo; +	u16 offset = 0; +	u8 priority; +	int i; + +	/* First Octet post subtype +	 * -------------------------- +	 * |will-|CBS  | Re-  | Max | +	 * |ing  |     |served| TCs | +	 * -------------------------- +	 * |1bit | 1bit|3 bits|3bits| +	 */ +	etscfg = &dcbcfg->etscfg; +	etscfg->willing = (u8)((buf[offset] & I40E_IEEE_ETS_WILLING_MASK) >> +			       I40E_IEEE_ETS_WILLING_SHIFT); +	etscfg->cbs = (u8)((buf[offset] & I40E_IEEE_ETS_CBS_MASK) >> +			   I40E_IEEE_ETS_CBS_SHIFT); +	etscfg->maxtcs = (u8)((buf[offset] & I40E_IEEE_ETS_MAXTC_MASK) >> +			      I40E_IEEE_ETS_MAXTC_SHIFT); + +	/* Move offset to Priority Assignment Table */ +	offset++; + +	/* Priority Assignment Table (4 octets) +	 * Octets:|    1    |    2    |    3    |    4    | +	 *        ----------------------------------------- +	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| +	 *        ----------------------------------------- +	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0| +	 *        ----------------------------------------- +	 */ +	for (i = 0; i < 4; i++) { +		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> +				I40E_IEEE_ETS_PRIO_1_SHIFT); +		etscfg->prioritytable[i * 2] =  priority; +		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> +				I40E_IEEE_ETS_PRIO_0_SHIFT); +		etscfg->prioritytable[i * 2 + 1] = priority; +		offset++; +	} + +	/* TC Bandwidth Table (8 octets) +	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +	 *        --------------------------------- +	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| +	 *        --------------------------------- +	 */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) +		etscfg->tcbwtable[i] = buf[offset++]; + +	/* TSA Assignment Table (8 octets) +	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +	 *        --------------------------------- +	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| +	 *        --------------------------------- +	 */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) +		etscfg->tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz ETS REC TLV + * @dcbcfg: Local store to update ETS REC data + * + * Parses IEEE 802.1Qaz ETS REC TLV + **/ +static void i40e_parse_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv, +				       struct i40e_dcbx_config *dcbcfg) +{ +	u8 *buf = tlv->tlvinfo; +	u16 offset = 0; +	u8 priority; +	int i; + +	/* Move offset to priority table */ +	offset++; + +	/* Priority Assignment Table (4 octets) +	 * Octets:|    1    |    2    |    3    |    4    | +	 *        ----------------------------------------- +	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| +	 *        ----------------------------------------- +	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0| +	 *        ----------------------------------------- +	 */ +	for (i = 0; i < 4; i++) { +		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_1_MASK) >> +				I40E_IEEE_ETS_PRIO_1_SHIFT); +		dcbcfg->etsrec.prioritytable[i*2] =  priority; +		priority = (u8)((buf[offset] & I40E_IEEE_ETS_PRIO_0_MASK) >> +				I40E_IEEE_ETS_PRIO_0_SHIFT); +		dcbcfg->etsrec.prioritytable[i*2 + 1] = priority; +		offset++; +	} + +	/* TC Bandwidth Table (8 octets) +	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +	 *        --------------------------------- +	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| +	 *        --------------------------------- +	 */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) +		dcbcfg->etsrec.tcbwtable[i] = buf[offset++]; + +	/* TSA Assignment Table (8 octets) +	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | +	 *        --------------------------------- +	 *        |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| +	 *        --------------------------------- +	 */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) +		dcbcfg->etsrec.tsatable[i] = buf[offset++]; +} + +/** + * i40e_parse_ieee_pfccfg_tlv + * @tlv: IEEE 802.1Qaz PFC CFG TLV + * @dcbcfg: Local store to update PFC CFG data + * + * Parses IEEE 802.1Qaz PFC CFG TLV + **/ +static void i40e_parse_ieee_pfccfg_tlv(struct i40e_lldp_org_tlv *tlv, +				       struct i40e_dcbx_config *dcbcfg) +{ +	u8 *buf = tlv->tlvinfo; + +	/* ---------------------------------------- +	 * |will-|MBC  | Re-  | PFC |  PFC Enable  | +	 * |ing  |     |served| cap |              | +	 * ----------------------------------------- +	 * |1bit | 1bit|2 bits|4bits| 1 octet      | +	 */ +	dcbcfg->pfc.willing = (u8)((buf[0] & I40E_IEEE_PFC_WILLING_MASK) >> +				   I40E_IEEE_PFC_WILLING_SHIFT); +	dcbcfg->pfc.mbc = (u8)((buf[0] & I40E_IEEE_PFC_MBC_MASK) >> +			       I40E_IEEE_PFC_MBC_SHIFT); +	dcbcfg->pfc.pfccap = (u8)((buf[0] & I40E_IEEE_PFC_CAP_MASK) >> +				  I40E_IEEE_PFC_CAP_SHIFT); +	dcbcfg->pfc.pfcenable = buf[1]; +} + +/** + * i40e_parse_ieee_app_tlv + * @tlv: IEEE 802.1Qaz APP TLV + * @dcbcfg: Local store to update APP PRIO data + * + * Parses IEEE 802.1Qaz APP PRIO TLV + **/ +static void i40e_parse_ieee_app_tlv(struct i40e_lldp_org_tlv *tlv, +				    struct i40e_dcbx_config *dcbcfg) +{ +	u16 typelength; +	u16 offset = 0; +	u16 length; +	int i = 0; +	u8 *buf; + +	typelength = ntohs(tlv->typelength); +	length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> +		       I40E_LLDP_TLV_LEN_SHIFT); +	buf = tlv->tlvinfo; + +	/* The App priority table starts 5 octets after TLV header */ +	length -= (sizeof(tlv->ouisubtype) + 1); + +	/* Move offset to App Priority Table */ +	offset++; + +	/* Application Priority Table (3 octets) +	 * Octets:|         1          |    2    |    3    | +	 *        ----------------------------------------- +	 *        |Priority|Rsrvd| Sel |    Protocol ID    | +	 *        ----------------------------------------- +	 *   Bits:|23    21|20 19|18 16|15                0| +	 *        ----------------------------------------- +	 */ +	while (offset < length) { +		dcbcfg->app[i].priority = (u8)((buf[offset] & +						I40E_IEEE_APP_PRIO_MASK) >> +					       I40E_IEEE_APP_PRIO_SHIFT); +		dcbcfg->app[i].selector = (u8)((buf[offset] & +						I40E_IEEE_APP_SEL_MASK) >> +					       I40E_IEEE_APP_SEL_SHIFT); +		dcbcfg->app[i].protocolid = (buf[offset + 1] << 0x8) | +					     buf[offset + 2]; +		/* Move to next app */ +		offset += 3; +		i++; +		if (i >= I40E_DCBX_MAX_APPS) +			break; +	} + +	dcbcfg->numapps = i; +} + +/** + * i40e_parse_ieee_etsrec_tlv + * @tlv: IEEE 802.1Qaz TLV + * @dcbcfg: Local store to update ETS REC data + * + * Get the TLV subtype and send it to parsing function + * based on the subtype value + **/ +static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv, +				struct i40e_dcbx_config *dcbcfg) +{ +	u32 ouisubtype; +	u8 subtype; + +	ouisubtype = ntohl(tlv->ouisubtype); +	subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >> +		       I40E_LLDP_TLV_SUBTYPE_SHIFT); +	switch (subtype) { +	case I40E_IEEE_SUBTYPE_ETS_CFG: +		i40e_parse_ieee_etscfg_tlv(tlv, dcbcfg); +		break; +	case I40E_IEEE_SUBTYPE_ETS_REC: +		i40e_parse_ieee_etsrec_tlv(tlv, dcbcfg); +		break; +	case I40E_IEEE_SUBTYPE_PFC_CFG: +		i40e_parse_ieee_pfccfg_tlv(tlv, dcbcfg); +		break; +	case I40E_IEEE_SUBTYPE_APP_PRI: +		i40e_parse_ieee_app_tlv(tlv, dcbcfg); +		break; +	default: +		break; +	} +} + +/** + * i40e_parse_org_tlv + * @tlv: Organization specific TLV + * @dcbcfg: Local store to update ETS REC data + * + * Currently only IEEE 802.1Qaz TLV is supported, all others + * will be returned + **/ +static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv, +			       struct i40e_dcbx_config *dcbcfg) +{ +	u32 ouisubtype; +	u32 oui; + +	ouisubtype = ntohl(tlv->ouisubtype); +	oui = (u32)((ouisubtype & I40E_LLDP_TLV_OUI_MASK) >> +		    I40E_LLDP_TLV_OUI_SHIFT); +	switch (oui) { +	case I40E_IEEE_8021QAZ_OUI: +		i40e_parse_ieee_tlv(tlv, dcbcfg); +		break; +	default: +		break; +	} +} + +/** + * i40e_lldp_to_dcb_config + * @lldpmib: LLDPDU to be parsed + * @dcbcfg: store for LLDPDU data + * + * Parse DCB configuration from the LLDPDU + **/ +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, +				    struct i40e_dcbx_config *dcbcfg) +{ +	i40e_status ret = 0; +	struct i40e_lldp_org_tlv *tlv; +	u16 type; +	u16 length; +	u16 typelength; +	u16 offset = 0; + +	if (!lldpmib || !dcbcfg) +		return I40E_ERR_PARAM; + +	/* set to the start of LLDPDU */ +	lldpmib += ETH_HLEN; +	tlv = (struct i40e_lldp_org_tlv *)lldpmib; +	while (1) { +		typelength = ntohs(tlv->typelength); +		type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> +			     I40E_LLDP_TLV_TYPE_SHIFT); +		length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> +			       I40E_LLDP_TLV_LEN_SHIFT); +		offset += sizeof(typelength) + length; + +		/* END TLV or beyond LLDPDU size */ +		if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE)) +			break; + +		switch (type) { +		case I40E_TLV_TYPE_ORG: +			i40e_parse_org_tlv(tlv, dcbcfg); +			break; +		default: +			break; +		} + +		/* Move to next TLV */ +		tlv = (struct i40e_lldp_org_tlv *)((char *)tlv + +						    sizeof(tlv->typelength) + +						    length); +	} + +	return ret; +} + +/** + * i40e_aq_get_dcb_config + * @hw: pointer to the hw struct + * @mib_type: mib type for the query + * @bridgetype: bridge type for the query (remote) + * @dcbcfg: store for LLDPDU data + * + * Query DCB configuration from the Firmware + **/ +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, +				   u8 bridgetype, +				   struct i40e_dcbx_config *dcbcfg) +{ +	i40e_status ret = 0; +	struct i40e_virt_mem mem; +	u8 *lldpmib; + +	/* Allocate the LLDPDU */ +	ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE); +	if (ret) +		return ret; + +	lldpmib = (u8 *)mem.va; +	ret = i40e_aq_get_lldp_mib(hw, bridgetype, mib_type, +				   (void *)lldpmib, I40E_LLDPDU_SIZE, +				   NULL, NULL, NULL); +	if (ret) +		goto free_mem; + +	/* Parse LLDP MIB to get dcb configuration */ +	ret = i40e_lldp_to_dcb_config(lldpmib, dcbcfg); + +free_mem: +	i40e_free_virt_mem(hw, &mem); +	return ret; +} + +/** + * i40e_get_dcb_config + * @hw: pointer to the hw struct + * + * Get DCB configuration from the Firmware + **/ +i40e_status i40e_get_dcb_config(struct i40e_hw *hw) +{ +	i40e_status ret = 0; + +	/* Get Local DCB Config */ +	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, +				     &hw->local_dcbx_config); +	if (ret) +		goto out; + +	/* Get Remote DCB Config */ +	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, +				     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, +				     &hw->remote_dcbx_config); +out: +	return ret; +} + +/** + * i40e_init_dcb + * @hw: pointer to the hw struct + * + * Update DCB configuration from the Firmware + **/ +i40e_status i40e_init_dcb(struct i40e_hw *hw) +{ +	i40e_status ret = 0; + +	if (!hw->func_caps.dcb) +		return ret; + +	/* Get DCBX status */ +	ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); +	if (ret) +		return ret; + +	/* Check the DCBX Status */ +	switch (hw->dcbx_status) { +	case I40E_DCBX_STATUS_DONE: +	case I40E_DCBX_STATUS_IN_PROGRESS: +		/* Get current DCBX configuration */ +		ret = i40e_get_dcb_config(hw); +		break; +	case I40E_DCBX_STATUS_DISABLED: +		return ret; +	case I40E_DCBX_STATUS_NOT_STARTED: +	case I40E_DCBX_STATUS_MULTIPLE_PEERS: +	default: +		break; +	} + +	/* Configure the LLDP MIB change event */ +	ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL); +	if (ret) +		return ret; + +	return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h new file mode 100644 index 00000000000..34cf1c30c7f --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -0,0 +1,107 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _I40E_DCB_H_ +#define _I40E_DCB_H_ + +#include "i40e_type.h" + +#define I40E_DCBX_STATUS_NOT_STARTED	0 +#define I40E_DCBX_STATUS_IN_PROGRESS	1 +#define I40E_DCBX_STATUS_DONE		2 +#define I40E_DCBX_STATUS_MULTIPLE_PEERS	3 +#define I40E_DCBX_STATUS_DISABLED	7 + +#define I40E_TLV_TYPE_END		0 +#define I40E_TLV_TYPE_ORG		127 + +#define I40E_IEEE_8021QAZ_OUI		0x0080C2 +#define I40E_IEEE_SUBTYPE_ETS_CFG	9 +#define I40E_IEEE_SUBTYPE_ETS_REC	10 +#define I40E_IEEE_SUBTYPE_PFC_CFG	11 +#define I40E_IEEE_SUBTYPE_APP_PRI	12 + +/* Defines for LLDP TLV header */ +#define I40E_LLDP_TLV_LEN_SHIFT		0 +#define I40E_LLDP_TLV_LEN_MASK		(0x01FF << I40E_LLDP_TLV_LEN_SHIFT) +#define I40E_LLDP_TLV_TYPE_SHIFT	9 +#define I40E_LLDP_TLV_TYPE_MASK		(0x7F << I40E_LLDP_TLV_TYPE_SHIFT) +#define I40E_LLDP_TLV_SUBTYPE_SHIFT	0 +#define I40E_LLDP_TLV_SUBTYPE_MASK	(0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) +#define I40E_LLDP_TLV_OUI_SHIFT		8 +#define I40E_LLDP_TLV_OUI_MASK		(0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) + +/* Defines for IEEE ETS TLV */ +#define I40E_IEEE_ETS_MAXTC_SHIFT	0 +#define I40E_IEEE_ETS_MAXTC_MASK	(0x7 << I40E_IEEE_ETS_MAXTC_SHIFT) +#define I40E_IEEE_ETS_CBS_SHIFT		6 +#define I40E_IEEE_ETS_CBS_MASK		(0x1 << I40E_IEEE_ETS_CBS_SHIFT) +#define I40E_IEEE_ETS_WILLING_SHIFT	7 +#define I40E_IEEE_ETS_WILLING_MASK	(0x1 << I40E_IEEE_ETS_WILLING_SHIFT) +#define I40E_IEEE_ETS_PRIO_0_SHIFT	0 +#define I40E_IEEE_ETS_PRIO_0_MASK	(0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) +#define I40E_IEEE_ETS_PRIO_1_SHIFT	4 +#define I40E_IEEE_ETS_PRIO_1_MASK	(0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) + +/* Defines for IEEE TSA types */ +#define I40E_IEEE_TSA_STRICT		0 +#define I40E_IEEE_TSA_ETS		2 + +/* Defines for IEEE PFC TLV */ +#define I40E_IEEE_PFC_CAP_SHIFT		0 +#define I40E_IEEE_PFC_CAP_MASK		(0xF << I40E_IEEE_PFC_CAP_SHIFT) +#define I40E_IEEE_PFC_MBC_SHIFT		6 +#define I40E_IEEE_PFC_MBC_MASK		(0x1 << I40E_IEEE_PFC_MBC_SHIFT) +#define I40E_IEEE_PFC_WILLING_SHIFT	7 +#define I40E_IEEE_PFC_WILLING_MASK	(0x1 << I40E_IEEE_PFC_WILLING_SHIFT) + +/* Defines for IEEE APP TLV */ +#define I40E_IEEE_APP_SEL_SHIFT		0 +#define I40E_IEEE_APP_SEL_MASK		(0x7 << I40E_IEEE_APP_SEL_SHIFT) +#define I40E_IEEE_APP_PRIO_SHIFT	5 +#define I40E_IEEE_APP_PRIO_MASK		(0x7 << I40E_IEEE_APP_PRIO_SHIFT) + + +#pragma pack(1) + +/* IEEE 802.1AB LLDP Organization specific TLV */ +struct i40e_lldp_org_tlv { +	__be16 typelength; +	__be32 ouisubtype; +	u8 tlvinfo[1]; +}; +#pragma pack() + +i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, +					   u16 *status); +i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib, +					      struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type, +					     u8 bridgetype, +					     struct i40e_dcbx_config *dcbcfg); +i40e_status i40e_get_dcb_config(struct i40e_hw *hw); +i40e_status i40e_init_dcb(struct i40e_hw *hw); +#endif /* _I40E_DCB_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c new file mode 100644 index 00000000000..00bc0cdb3a0 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -0,0 +1,316 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifdef CONFIG_I40E_DCB +#include "i40e.h" +#include <net/dcbnl.h> + +/** + * i40e_get_pfc_delay - retrieve PFC Link Delay + * @hw: pointer to hardware struct + * @delay: holds the PFC Link delay value + * + * Returns PFC Link Delay from the PRTDCB_GENC.PFCLDA + **/ +static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) +{ +	u32 val; + +	val = rd32(hw, I40E_PRTDCB_GENC); +	*delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> +		       I40E_PRTDCB_GENC_PFCLDA_SHIFT); +} + +/** + * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the ETS information + * + * Returns local IEEE ETS configuration + **/ +static int i40e_dcbnl_ieee_getets(struct net_device *dev, +				  struct ieee_ets *ets) +{ +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); +	struct i40e_dcbx_config *dcbxcfg; +	struct i40e_hw *hw = &pf->hw; + +	if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return -EINVAL; + +	dcbxcfg = &hw->local_dcbx_config; +	ets->willing = dcbxcfg->etscfg.willing; +	ets->ets_cap = dcbxcfg->etscfg.maxtcs; +	ets->cbs = dcbxcfg->etscfg.cbs; +	memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, +		sizeof(ets->tc_tx_bw)); +	memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, +		sizeof(ets->tc_rx_bw)); +	memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, +		sizeof(ets->tc_tsa)); +	memcpy(ets->prio_tc, dcbxcfg->etscfg.prioritytable, +		sizeof(ets->prio_tc)); +	memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable, +		sizeof(ets->tc_reco_bw)); +	memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable, +		sizeof(ets->tc_reco_tsa)); +	memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prioritytable, +		sizeof(ets->reco_prio_tc)); + +	return 0; +} + +/** + * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration + * @netdev: the corresponding netdev + * @ets: structure to hold the PFC information + * + * Returns local IEEE PFC configuration + **/ +static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, +				  struct ieee_pfc *pfc) +{ +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); +	struct i40e_dcbx_config *dcbxcfg; +	struct i40e_hw *hw = &pf->hw; +	int i; + +	if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) +		return -EINVAL; + +	dcbxcfg = &hw->local_dcbx_config; +	pfc->pfc_cap = dcbxcfg->pfc.pfccap; +	pfc->pfc_en = dcbxcfg->pfc.pfcenable; +	pfc->mbc = dcbxcfg->pfc.mbc; +	i40e_get_pfc_delay(hw, &pfc->delay); + +	/* Get Requests/Indicatiosn */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { +		pfc->requests[i] = pf->stats.priority_xoff_tx[i]; +		pfc->indications[i] = pf->stats.priority_xoff_rx[i]; +	} + +	return 0; +} + +/** + * i40e_dcbnl_getdcbx - retrieve current DCBx capability + * @netdev: the corresponding netdev + * + * Returns DCBx capability features + **/ +static u8 i40e_dcbnl_getdcbx(struct net_device *dev) +{ +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); + +	return pf->dcbx_cap; +} + +/** + * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx + * @netdev: the corresponding netdev + * + * Returns the SAN MAC address used for LLDP exchange + **/ +static void i40e_dcbnl_get_perm_hw_addr(struct net_device *dev, +					u8 *perm_addr) +{ +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); +	int i, j; + +	memset(perm_addr, 0xff, MAX_ADDR_LEN); + +	for (i = 0; i < dev->addr_len; i++) +		perm_addr[i] = pf->hw.mac.perm_addr[i]; + +	for (j = 0; j < dev->addr_len; j++, i++) +		perm_addr[i] = pf->hw.mac.san_addr[j]; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { +	.ieee_getets	= i40e_dcbnl_ieee_getets, +	.ieee_getpfc	= i40e_dcbnl_ieee_getpfc, +	.getdcbx	= i40e_dcbnl_getdcbx, +	.getpermhwaddr  = i40e_dcbnl_get_perm_hw_addr, +}; + +/** + * i40e_dcbnl_set_all - set all the apps and ieee data from DCBx config + * @vsi: the corresponding vsi + * + * Set up all the IEEE APPs in the DCBNL App Table and generate event for + * other settings + **/ +void i40e_dcbnl_set_all(struct i40e_vsi *vsi) +{ +	struct net_device *dev = vsi->netdev; +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); +	struct i40e_dcbx_config *dcbxcfg; +	struct i40e_hw *hw = &pf->hw; +	struct dcb_app sapp; +	u8 prio, tc_map; +	int i; + +	/* DCB not enabled */ +	if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) +		return; + +	dcbxcfg = &hw->local_dcbx_config; + +	/* Set up all the App TLVs if DCBx is negotiated */ +	for (i = 0; i < dcbxcfg->numapps; i++) { +		prio = dcbxcfg->app[i].priority; +		tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]); + +		/* Add APP only if the TC is enabled for this VSI */ +		if (tc_map & vsi->tc_config.enabled_tc) { +			sapp.selector = dcbxcfg->app[i].selector; +			sapp.protocol = dcbxcfg->app[i].protocolid; +			sapp.priority = prio; +			dcb_ieee_setapp(dev, &sapp); +		} +	} + +	/* Notify user-space of the changes */ +	dcbnl_ieee_notify(dev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0); +} + +/** + * i40e_dcbnl_vsi_del_app - Delete APP for given VSI + * @vsi: the corresponding vsi + * @app: APP to delete + * + * Delete given APP from the DCBNL APP table for given + * VSI + **/ +static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, +				  struct i40e_ieee_app_priority_table *app) +{ +	struct net_device *dev = vsi->netdev; +	struct dcb_app sapp; + +	if (!dev) +		return -EINVAL; + +	sapp.selector = app->selector; +	sapp.protocol = app->protocolid; +	sapp.priority = app->priority; +	return dcb_ieee_delapp(dev, &sapp); +} + +/** + * i40e_dcbnl_del_app - Delete APP on all VSIs + * @pf: the corresponding pf + * @app: APP to delete + * + * Delete given APP from all the VSIs for given PF + **/ +static void i40e_dcbnl_del_app(struct i40e_pf *pf, +			      struct i40e_ieee_app_priority_table *app) +{ +	int v, err; +	for (v = 0; v < pf->num_alloc_vsi; v++) { +		if (pf->vsi[v] && pf->vsi[v]->netdev) { +			err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); +			if (err) +				dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", +					 __func__, pf->vsi[v]->seid, +					 err, app->selector, +					 app->protocolid, app->priority); +		} +	} +} + +/** + * i40e_dcbnl_find_app - Search APP in given DCB config + * @cfg: DCBX configuration data + * @app: APP to search for + * + * Find given APP in the DCB configuration + **/ +static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, +				struct i40e_ieee_app_priority_table *app) +{ +	int i; + +	for (i = 0; i < cfg->numapps; i++) { +		if (app->selector == cfg->app[i].selector && +		    app->protocolid == cfg->app[i].protocolid && +		    app->priority == cfg->app[i].priority) +			return true; +	} + +	return false; +} + +/** + * i40e_dcbnl_flush_apps - Delete all removed APPs + * @pf: the corresponding pf + * @new_cfg: new DCBX configuration data + * + * Find and delete all APPs that are not present in the passed + * DCB configuration + **/ +void i40e_dcbnl_flush_apps(struct i40e_pf *pf, +			   struct i40e_dcbx_config *new_cfg) +{ +	struct i40e_ieee_app_priority_table app; +	struct i40e_dcbx_config *dcbxcfg; +	struct i40e_hw *hw = &pf->hw; +	int i; + +	dcbxcfg = &hw->local_dcbx_config; +	for (i = 0; i < dcbxcfg->numapps; i++) { +		app = dcbxcfg->app[i]; +		/* The APP is not available anymore delete it */ +		if (!i40e_dcbnl_find_app(new_cfg, &app)) +			i40e_dcbnl_del_app(pf, &app); +	} +} + +/** + * i40e_dcbnl_setup - DCBNL setup + * @vsi: the corresponding vsi + * + * Set up DCBNL ops and initial APP TLVs + **/ +void i40e_dcbnl_setup(struct i40e_vsi *vsi) +{ +	struct net_device *dev = vsi->netdev; +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); + +	/* Not DCB capable */ +	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) +		return; + +	/* Do not setup DCB NL ops for MFP mode */ +	if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) +		dev->dcbnl_ops = &dcbnl_ops; + +	/* Set initial IEEE DCB settings */ +	i40e_dcbnl_set_all(vsi); +} +#endif /* CONFIG_I40E_DCB */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8dbd91f64b7..cffdfc21290 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -46,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)  	if (seid < 0)  		dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);  	else -		for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +		for (i = 0; i < pf->num_alloc_vsi; i++)  			if (pf->vsi[i] && (pf->vsi[i]->seid == seid))  				return pf->vsi[i]; @@ -151,9 +150,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,  				   size_t count, loff_t *ppos)  {  	struct i40e_pf *pf = filp->private_data; -	char dump_request_buf[16];  	bool seid_found = false; -	int bytes_not_copied;  	long seid = -1;  	int buflen = 0;  	int i, ret; @@ -163,21 +160,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,  	/* don't allow partial writes */  	if (*ppos != 0)  		return 0; -	if (count >= sizeof(dump_request_buf)) -		return -ENOSPC; - -	bytes_not_copied = copy_from_user(dump_request_buf, buffer, count); -	if (bytes_not_copied < 0) -		return bytes_not_copied; -	if (bytes_not_copied > 0) -		count -= bytes_not_copied; -	dump_request_buf[count] = '\0';  	/* decode the SEID given to be dumped */ -	ret = kstrtol(dump_request_buf, 0, &seid); -	if (ret < 0) { -		dev_info(&pf->pdev->dev, "bad seid value '%s'\n", -			 dump_request_buf); +	ret = kstrtol_from_user(buffer, count, 0, &seid); + +	if (ret) { +		dev_info(&pf->pdev->dev, "bad seid value\n");  	} else if (seid == 0) {  		seid_found = true; @@ -203,12 +191,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,  			len = (sizeof(struct i40e_aq_desc)  					* pf->hw.aq.num_asq_entries); -			memcpy(p, pf->hw.aq.asq.desc, len); +			memcpy(p, pf->hw.aq.asq.desc_buf.va, len);  			p += len;  			len = (sizeof(struct i40e_aq_desc)  					* pf->hw.aq.num_arq_entries); -			memcpy(p, pf->hw.aq.arq.desc, len); +			memcpy(p, pf->hw.aq.arq.desc_buf.va, len);  			p += len;  			i40e_dbg_dump_data_len = buflen; @@ -245,26 +233,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,  			memcpy(p, vsi, len);  			p += len; -			len = (sizeof(struct i40e_q_vector) -				* vsi->num_q_vectors); -			memcpy(p, vsi->q_vectors, len); -			p += len; - -			len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); -			memcpy(p, vsi->tx_rings, len); -			p += len; -			memcpy(p, vsi->rx_rings, len); -			p += len; +			if (vsi->num_q_vectors) { +				len = (sizeof(struct i40e_q_vector) +					* vsi->num_q_vectors); +				memcpy(p, vsi->q_vectors, len); +				p += len; +			} -			for (i = 0; i < vsi->num_queue_pairs; i++) { -				len = sizeof(struct i40e_tx_buffer); -				memcpy(p, vsi->tx_rings[i].tx_bi, len); +			if (vsi->num_queue_pairs) { +				len = (sizeof(struct i40e_ring) * +				      vsi->num_queue_pairs); +				memcpy(p, vsi->tx_rings, len); +				p += len; +				memcpy(p, vsi->rx_rings, len);  				p += len;  			} -			for (i = 0; i < vsi->num_queue_pairs; i++) { + +			if (vsi->tx_rings[0]) { +				len = sizeof(struct i40e_tx_buffer); +				for (i = 0; i < vsi->num_queue_pairs; i++) { +					memcpy(p, vsi->tx_rings[i]->tx_bi, len); +					p += len; +				}  				len = sizeof(struct i40e_rx_buffer); -				memcpy(p, vsi->rx_rings[i].rx_bi, len); -				p += len; +				for (i = 0; i < vsi->num_queue_pairs; i++) { +					memcpy(p, vsi->rx_rings[i]->rx_bi, len); +					p += len; +				}  			}  			/* macvlan filter list */ @@ -366,7 +361,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,  }  /** - * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum + * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum   * @pf: the i40e_pf created in command write   * @seid: the seid the user put in   **/ @@ -484,100 +479,105 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)  		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",  		 vsi->tx_restart, vsi->tx_busy,  		 vsi->rx_buf_failed, vsi->rx_page_failed); -	if (vsi->rx_rings) { -		for (i = 0; i < vsi->num_queue_pairs; i++) { -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: desc = %p\n", -				 i, vsi->rx_rings[i].desc); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", -				 i, vsi->rx_rings[i].dev, -				 vsi->rx_rings[i].netdev, -				 vsi->rx_rings[i].rx_bi); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", -				 i, vsi->rx_rings[i].state, -				 vsi->rx_rings[i].queue_index, -				 vsi->rx_rings[i].reg_idx); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", -				 i, vsi->rx_rings[i].rx_hdr_len, -				 vsi->rx_rings[i].rx_buf_len, -				 vsi->rx_rings[i].dtype); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", -				 i, vsi->rx_rings[i].hsplit, -				 vsi->rx_rings[i].next_to_use, -				 vsi->rx_rings[i].next_to_clean, -				 vsi->rx_rings[i].ring_active); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", -				 i, vsi->rx_rings[i].rx_stats.packets, -				 vsi->rx_rings[i].rx_stats.bytes, -				 vsi->rx_rings[i].rx_stats.non_eop_descs); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", -				 i, -				 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, -				vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n", -				 i, vsi->rx_rings[i].size, -				 (long unsigned int)vsi->rx_rings[i].dma); -			dev_info(&pf->pdev->dev, -				 "    rx_rings[%i]: vsi = %p, q_vector = %p\n", -				 i, vsi->rx_rings[i].vsi, -				 vsi->rx_rings[i].q_vector); -		} +	rcu_read_lock(); +	for (i = 0; i < vsi->num_queue_pairs; i++) { +		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); +		if (!rx_ring) +			continue; + +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: desc = %p\n", +			 i, rx_ring->desc); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", +			 i, rx_ring->dev, +			 rx_ring->netdev, +			 rx_ring->rx_bi); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", +			 i, rx_ring->state, +			 rx_ring->queue_index, +			 rx_ring->reg_idx); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", +			 i, rx_ring->rx_hdr_len, +			 rx_ring->rx_buf_len, +			 rx_ring->dtype); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", +			 i, rx_ring->hsplit, +			 rx_ring->next_to_use, +			 rx_ring->next_to_clean, +			 rx_ring->ring_active); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", +			 i, rx_ring->stats.packets, +			 rx_ring->stats.bytes, +			 rx_ring->rx_stats.non_eop_descs); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", +			 i, +			 rx_ring->rx_stats.alloc_page_failed, +			 rx_ring->rx_stats.alloc_buff_failed); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n", +			 i, rx_ring->size, +			 (long unsigned int)rx_ring->dma); +		dev_info(&pf->pdev->dev, +			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n", +			 i, rx_ring->vsi, +			 rx_ring->q_vector);  	} -	if (vsi->tx_rings) { -		for (i = 0; i < vsi->num_queue_pairs; i++) { -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: desc = %p\n", -				 i, vsi->tx_rings[i].desc); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", -				 i, vsi->tx_rings[i].dev, -				 vsi->tx_rings[i].netdev, -				 vsi->tx_rings[i].tx_bi); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", -				 i, vsi->tx_rings[i].state, -				 vsi->tx_rings[i].queue_index, -				 vsi->tx_rings[i].reg_idx); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: dtype = %d\n", -				 i, vsi->tx_rings[i].dtype); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", -				 i, vsi->tx_rings[i].hsplit, -				 vsi->tx_rings[i].next_to_use, -				 vsi->tx_rings[i].next_to_clean, -				 vsi->tx_rings[i].ring_active); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", -				 i, vsi->tx_rings[i].tx_stats.packets, -				 vsi->tx_rings[i].tx_stats.bytes, -				 vsi->tx_rings[i].tx_stats.restart_queue); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n", -				 i, -				 vsi->tx_rings[i].tx_stats.tx_busy, -				 vsi->tx_rings[i].tx_stats.completed, -				 vsi->tx_rings[i].tx_stats.tx_done_old); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n", -				 i, vsi->tx_rings[i].size, -				 (long unsigned int)vsi->tx_rings[i].dma); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: vsi = %p, q_vector = %p\n", -				 i, vsi->tx_rings[i].vsi, -				 vsi->tx_rings[i].q_vector); -			dev_info(&pf->pdev->dev, -				 "    tx_rings[%i]: DCB tc = %d\n", -				 i, vsi->tx_rings[i].dcb_tc); -		} +	for (i = 0; i < vsi->num_queue_pairs; i++) { +		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); +		if (!tx_ring) +			continue; + +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: desc = %p\n", +			 i, tx_ring->desc); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", +			 i, tx_ring->dev, +			 tx_ring->netdev, +			 tx_ring->tx_bi); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", +			 i, tx_ring->state, +			 tx_ring->queue_index, +			 tx_ring->reg_idx); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: dtype = %d\n", +			 i, tx_ring->dtype); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", +			 i, tx_ring->hsplit, +			 tx_ring->next_to_use, +			 tx_ring->next_to_clean, +			 tx_ring->ring_active); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", +			 i, tx_ring->stats.packets, +			 tx_ring->stats.bytes, +			 tx_ring->tx_stats.restart_queue); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", +			 i, +			 tx_ring->tx_stats.tx_busy, +			 tx_ring->tx_stats.tx_done_old); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n", +			 i, tx_ring->size, +			 (long unsigned int)tx_ring->dma); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n", +			 i, tx_ring->vsi, +			 tx_ring->q_vector); +		dev_info(&pf->pdev->dev, +			 "    tx_rings[%i]: DCB tc = %d\n", +			 i, tx_ring->dcb_tc);  	} +	rcu_read_unlock();  	dev_info(&pf->pdev->dev,  		 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",  		 vsi->work_limit, vsi->rx_itr_setting, @@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)  	dev_info(&pf->pdev->dev,  		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",  		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); -	if (vsi->q_vectors) { -		for (i = 0; i < vsi->num_q_vectors; i++) { -			dev_info(&pf->pdev->dev, -				 "    q_vectors[%i]: base index = %ld\n", -				 i, ((long int)*vsi->q_vectors[i].rx.ring- -					(long int)*vsi->q_vectors[0].rx.ring)/ -					sizeof(struct i40e_ring)); -		} -	}  	dev_info(&pf->pdev->dev,  		 "    num_q_vectors = %i, base_vector = %i\n",  		 vsi->num_q_vectors, vsi->base_vector); @@ -716,8 +707,13 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)  {  	struct i40e_adminq_ring *ring;  	struct i40e_hw *hw = &pf->hw; +	char hdr[32];  	int i; +	snprintf(hdr, sizeof(hdr), "%s %s:         ", +		 dev_driver_string(&pf->pdev->dev), +		 dev_name(&pf->pdev->dev)); +  	/* first the send (command) ring, then the receive (event) ring */  	dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");  	ring = &(hw->aq.asq); @@ -727,14 +723,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)  			 "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",  			 i, d->flags, d->opcode, d->datalen, d->retval,  			 d->cookie_high, d->cookie_low); -		dev_info(&pf->pdev->dev, -			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", -			 d->params.raw[0], d->params.raw[1], d->params.raw[2], -			 d->params.raw[3], d->params.raw[4], d->params.raw[5], -			 d->params.raw[6], d->params.raw[7], d->params.raw[8], -			 d->params.raw[9], d->params.raw[10], d->params.raw[11], -			 d->params.raw[12], d->params.raw[13], -			 d->params.raw[14], d->params.raw[15]); +		print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, +			       16, 1, d->params.raw, 16, 0);  	}  	dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); @@ -745,14 +735,8 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)  			 "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",  			 i, d->flags, d->opcode, d->datalen, d->retval,  			 d->cookie_high, d->cookie_low); -		dev_info(&pf->pdev->dev, -			 "            %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", -			 d->params.raw[0], d->params.raw[1], d->params.raw[2], -			 d->params.raw[3], d->params.raw[4], d->params.raw[5], -			 d->params.raw[6], d->params.raw[7], d->params.raw[8], -			 d->params.raw[9], d->params.raw[10], d->params.raw[11], -			 d->params.raw[12], d->params.raw[13], -			 d->params.raw[14], d->params.raw[15]); +		print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, +			       16, 1, d->params.raw, 16, 0);  	}  } @@ -768,53 +752,56 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)  static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,  			       struct i40e_pf *pf, bool is_rx_ring)  { -	union i40e_rx_desc *ds; +	struct i40e_tx_desc *txd; +	union i40e_rx_desc *rxd;  	struct i40e_ring ring;  	struct i40e_vsi *vsi;  	int i;  	vsi = i40e_dbg_find_vsi(pf, vsi_seid);  	if (!vsi) { -		dev_info(&pf->pdev->dev, -			 "vsi %d not found\n", vsi_seid); -		if (is_rx_ring) -			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); -		else -			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); +		dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);  		return;  	}  	if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {  		dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); -		if (is_rx_ring) -			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); -		else -			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); +		return; +	} +	if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { +		dev_info(&pf->pdev->dev, +			 "descriptor rings have not been allocated for vsi %d\n", +			 vsi_seid);  		return;  	}  	if (is_rx_ring) -		ring = vsi->rx_rings[ring_id]; +		ring = *vsi->rx_rings[ring_id];  	else -		ring = vsi->tx_rings[ring_id]; +		ring = *vsi->tx_rings[ring_id];  	if (cnt == 2) {  		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",  			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);  		for (i = 0; i < ring.count; i++) { -			if (is_rx_ring) -				ds = I40E_RX_DESC(&ring, i); -			else -				ds = (union i40e_rx_desc *) -					I40E_TX_DESC(&ring, i); -			if ((sizeof(union i40e_rx_desc) == -			    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring)) +			if (!is_rx_ring) { +				txd = I40E_TX_DESC(&ring, i);  				dev_info(&pf->pdev->dev, -					 "   d[%03i] = 0x%016llx 0x%016llx\n", i, -					 ds->read.pkt_addr, ds->read.hdr_addr); -			else +					 "   d[%03i] = 0x%016llx 0x%016llx\n", +					 i, txd->buffer_addr, +					 txd->cmd_type_offset_bsz); +			} else if (sizeof(union i40e_rx_desc) == +				   sizeof(union i40e_16byte_rx_desc)) { +				rxd = I40E_RX_DESC(&ring, i); +				dev_info(&pf->pdev->dev, +					 "   d[%03i] = 0x%016llx 0x%016llx\n", +					 i, rxd->read.pkt_addr, +					 rxd->read.hdr_addr); +			} else { +				rxd = I40E_RX_DESC(&ring, i);  				dev_info(&pf->pdev->dev,  					 "   d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", -					 i, ds->read.pkt_addr, -					 ds->read.hdr_addr, -					 ds->read.rsvd1, ds->read.rsvd2); +					 i, rxd->read.pkt_addr, +					 rxd->read.hdr_addr, +					 rxd->read.rsvd1, rxd->read.rsvd2); +			}  		}  	} else if (cnt == 3) {  		if (desc_n >= ring.count || desc_n < 0) { @@ -822,27 +809,29 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,  				 "descriptor %d not found\n", desc_n);  			return;  		} -		if (is_rx_ring) -			ds = I40E_RX_DESC(&ring, desc_n); -		else -			ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n); -		if ((sizeof(union i40e_rx_desc) == -		    sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring)) -			dev_info(&pf->pdev->dev, -				 "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", -				 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id, -				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr); -		else +		if (!is_rx_ring) { +			txd = I40E_TX_DESC(&ring, desc_n); +			dev_info(&pf->pdev->dev, +				 "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", +				 vsi_seid, ring_id, desc_n, +				 txd->buffer_addr, txd->cmd_type_offset_bsz); +		} else if (sizeof(union i40e_rx_desc) == +			   sizeof(union i40e_16byte_rx_desc)) { +			rxd = I40E_RX_DESC(&ring, desc_n); +			dev_info(&pf->pdev->dev, +				 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", +				 vsi_seid, ring_id, desc_n, +				 rxd->read.pkt_addr, rxd->read.hdr_addr); +		} else { +			rxd = I40E_RX_DESC(&ring, desc_n);  			dev_info(&pf->pdev->dev,  				 "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", -				 vsi_seid, ring_id, -				 desc_n, ds->read.pkt_addr, ds->read.hdr_addr, -				 ds->read.rsvd1, ds->read.rsvd2); +				 vsi_seid, ring_id, desc_n, +				 rxd->read.pkt_addr, rxd->read.hdr_addr, +				 rxd->read.rsvd1, rxd->read.rsvd2); +		}  	} else { -		if (is_rx_ring) -			dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); -		else -			dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); +		dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");  	}  } @@ -854,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)  {  	int i; -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +	for (i = 0; i < pf->num_alloc_vsi; i++)  		if (pf->vsi[i])  			dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",  				 i, pf->vsi[i]->seid); @@ -873,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,  		 "    rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",  		estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);  	dev_info(&pf->pdev->dev, -		 "    rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n", -		 estats->rx_broadcast, estats->rx_discards, estats->rx_errors); +		 "    rx_broadcast = \t%lld \trx_discards = \t\t%lld\n", +		 estats->rx_broadcast, estats->rx_discards);  	dev_info(&pf->pdev->dev, -		 "    rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", -		 estats->rx_missed, estats->rx_unknown_protocol, -		 estats->tx_bytes); +		 "    rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", +		 estats->rx_unknown_protocol, estats->tx_bytes);  	dev_info(&pf->pdev->dev,  		 "    tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",  		 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); @@ -988,8 +976,7 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)  	veb = i40e_dbg_find_veb(pf, seid);  	if (!veb) { -		dev_info(&pf->pdev->dev, -			 "%d: can't find veb\n", seid); +		dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);  		return;  	}  	dev_info(&pf->pdev->dev, @@ -1015,6 +1002,24 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)  	}  } +/** + * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR + * @pf: the pf that would be altered + * @flag: flag that needs enabling or disabling + * @enable: Enable/disable FD SD/ATR + **/ +static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) +{ +	if (enable) { +		pf->flags |= flag; +	} else { +		pf->flags &= ~flag; +		pf->auto_disable_flags |= flag; +	} +	dev_info(&pf->pdev->dev, "requesting a pf reset\n"); +	i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); +} +  #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)  /**   * i40e_dbg_command_write - write into command datum @@ -1028,11 +1033,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				      size_t count, loff_t *ppos)  {  	struct i40e_pf *pf = filp->private_data; +	char *cmd_buf, *cmd_buf_tmp;  	int bytes_not_copied;  	struct i40e_vsi *vsi; -	u8 *print_buf_start; -	u8 *print_buf; -	char *cmd_buf;  	int vsi_seid;  	int veb_seid;  	int cnt; @@ -1051,10 +1054,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		count -= bytes_not_copied;  	cmd_buf[count] = '\0'; -	print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); -	if (!print_buf_start) -		goto command_write_done; -	print_buf = print_buf_start; +	cmd_buf_tmp = strchr(cmd_buf, '\n'); +	if (cmd_buf_tmp) { +		*cmd_buf_tmp = '\0'; +		count = cmd_buf_tmp - cmd_buf + 1; +	}  	if (strncmp(cmd_buf, "add vsi", 7) == 0) {  		vsi_seid = -1; @@ -1107,7 +1111,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		vsi = i40e_dbg_find_vsi(pf, vsi_seid);  		if (!vsi) {  			dev_info(&pf->pdev->dev, -				 "add relay: vsi VSI %d not found\n", vsi_seid); +				 "add relay: VSI %d not found\n", vsi_seid);  			goto command_write_done;  		} @@ -1157,9 +1161,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		i40e_veb_release(pf->veb[i]);  	} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { -		u8 ma[6]; -		int vlan = 0;  		struct i40e_mac_filter *f; +		int vlan = 0; +		u8 ma[6];  		int ret;  		cnt = sscanf(&cmd_buf[11], @@ -1195,8 +1199,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				 ma, vlan, vsi_seid, f, ret);  	} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { -		u8 ma[6];  		int vlan = 0; +		u8 ma[6];  		int ret;  		cnt = sscanf(&cmd_buf[11], @@ -1232,9 +1236,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				 ma, vlan, vsi_seid, ret);  	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) { -		int v; -		u16 vid;  		i40e_status ret; +		u16 vid; +		int v;  		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);  		if (cnt != 2) { @@ -1464,21 +1468,25 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				 pf->msg_enable);  		}  	} else if (strncmp(cmd_buf, "pfr", 3) == 0) { -		dev_info(&pf->pdev->dev, "forcing PFR\n"); -		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); +		dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); +		i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));  	} else if (strncmp(cmd_buf, "corer", 5) == 0) { -		dev_info(&pf->pdev->dev, "forcing CoreR\n"); -		i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED)); +		dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); +		i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));  	} else if (strncmp(cmd_buf, "globr", 5) == 0) { -		dev_info(&pf->pdev->dev, "forcing GlobR\n"); -		i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); +		dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); +		i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); + +	} else if (strncmp(cmd_buf, "empr", 4) == 0) { +		dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); +		i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));  	} else if (strncmp(cmd_buf, "read", 4) == 0) {  		u32 address;  		u32 value; -		cnt = sscanf(&cmd_buf[4], "%x", &address); +		cnt = sscanf(&cmd_buf[4], "%i", &address);  		if (cnt != 1) {  			dev_info(&pf->pdev->dev, "read <reg>\n");  			goto command_write_done; @@ -1497,7 +1505,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  	} else if (strncmp(cmd_buf, "write", 5) == 0) {  		u32 address, value; -		cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value); +		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);  		if (cnt != 2) {  			dev_info(&pf->pdev->dev, "write <reg> <value>\n");  			goto command_write_done; @@ -1515,10 +1523,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  			 address, value);  	} else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {  		if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { -			cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid); +			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);  			if (cnt == 0) {  				int i; -				for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +				for (i = 0; i < pf->num_alloc_vsi; i++)  					i40e_vsi_reset_stats(pf->vsi[i]);  				dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");  			} else if (cnt == 1) { @@ -1542,32 +1550,152 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		} else {  			dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");  		} +	} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { +		struct i40e_aq_desc *desc; +		i40e_status ret; + +		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); +		if (!desc) +			goto command_write_done; +		cnt = sscanf(&cmd_buf[11], +			     "%hx %hx %hx %hx %x %x %x %x %x %x", +			     &desc->flags, +			     &desc->opcode, &desc->datalen, &desc->retval, +			     &desc->cookie_high, &desc->cookie_low, +			     &desc->params.internal.param0, +			     &desc->params.internal.param1, +			     &desc->params.internal.param2, +			     &desc->params.internal.param3); +		if (cnt != 10) { +			dev_info(&pf->pdev->dev, +				 "send aq_cmd: bad command string, cnt=%d\n", +				 cnt); +			kfree(desc); +			desc = NULL; +			goto command_write_done; +		} +		ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); +		if (!ret) { +			dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); +		} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { +			dev_info(&pf->pdev->dev, +				 "AQ command send failed Opcode %x AQ Error: %d\n", +				 desc->opcode, pf->hw.aq.asq_last_status); +		} else { +			dev_info(&pf->pdev->dev, +				 "AQ command send failed Opcode %x Status: %d\n", +				 desc->opcode, ret); +		} +		dev_info(&pf->pdev->dev, +			 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", +			 desc->flags, desc->opcode, desc->datalen, desc->retval, +			 desc->cookie_high, desc->cookie_low, +			 desc->params.internal.param0, +			 desc->params.internal.param1, +			 desc->params.internal.param2, +			 desc->params.internal.param3); +		kfree(desc); +		desc = NULL; +	} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { +		struct i40e_aq_desc *desc; +		i40e_status ret; +		u16 buffer_len; +		u8 *buff; + +		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); +		if (!desc) +			goto command_write_done; +		cnt = sscanf(&cmd_buf[20], +			     "%hx %hx %hx %hx %x %x %x %x %x %x %hd", +			     &desc->flags, +			     &desc->opcode, &desc->datalen, &desc->retval, +			     &desc->cookie_high, &desc->cookie_low, +			     &desc->params.internal.param0, +			     &desc->params.internal.param1, +			     &desc->params.internal.param2, +			     &desc->params.internal.param3, +			     &buffer_len); +		if (cnt != 11) { +			dev_info(&pf->pdev->dev, +				 "send indirect aq_cmd: bad command string, cnt=%d\n", +				 cnt); +			kfree(desc); +			desc = NULL; +			goto command_write_done; +		} +		/* Just stub a buffer big enough in case user messed up */ +		if (buffer_len == 0) +			buffer_len = 1280; + +		buff = kzalloc(buffer_len, GFP_KERNEL); +		if (!buff) { +			kfree(desc); +			desc = NULL; +			goto command_write_done; +		} +		desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); +		ret = i40e_asq_send_command(&pf->hw, desc, buff, +					    buffer_len, NULL); +		if (!ret) { +			dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); +		} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { +			dev_info(&pf->pdev->dev, +				 "AQ command send failed Opcode %x AQ Error: %d\n", +				 desc->opcode, pf->hw.aq.asq_last_status); +		} else { +			dev_info(&pf->pdev->dev, +				 "AQ command send failed Opcode %x Status: %d\n", +				 desc->opcode, ret); +		} +		dev_info(&pf->pdev->dev, +			 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", +			 desc->flags, desc->opcode, desc->datalen, desc->retval, +			 desc->cookie_high, desc->cookie_low, +			 desc->params.internal.param0, +			 desc->params.internal.param1, +			 desc->params.internal.param2, +			 desc->params.internal.param3); +		print_hex_dump(KERN_INFO, "AQ buffer WB: ", +			       DUMP_PREFIX_OFFSET, 16, 1, +			       buff, buffer_len, true); +		kfree(buff); +		buff = NULL; +		kfree(desc); +		desc = NULL;  	} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||  		   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { -		struct i40e_fdir_data fd_data; -		int ret; +		struct i40e_fdir_filter fd_data;  		u16 packet_len, i, j = 0;  		char *asc_packet; +		u8 *raw_packet;  		bool add = false; +		int ret; + +		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) +			goto command_write_done; + +		if (strncmp(cmd_buf, "add", 3) == 0) +			add = true; + +		if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) +			goto command_write_done; -		asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, +		asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,  				     GFP_KERNEL);  		if (!asc_packet)  			goto command_write_done; -		fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, -					     GFP_KERNEL); +		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, +				     GFP_KERNEL); -		if (!fd_data.raw_packet) { +		if (!raw_packet) {  			kfree(asc_packet);  			asc_packet = NULL;  			goto command_write_done;  		} -		if (strncmp(cmd_buf, "add", 3) == 0) -			add = true;  		cnt = sscanf(&cmd_buf[13], -			     "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s", +			     "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",  			     &fd_data.q_index,  			     &fd_data.flex_off, &fd_data.pctype,  			     &fd_data.dest_vsi, &fd_data.dest_ctl, @@ -1579,42 +1707,42 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				 cnt);  			kfree(asc_packet);  			asc_packet = NULL; -			kfree(fd_data.raw_packet); +			kfree(raw_packet);  			goto command_write_done;  		}  		/* fix packet length if user entered 0 */  		if (packet_len == 0) -			packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; +			packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;  		/* make sure to check the max as well */  		packet_len = min_t(u16, -				   packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); +				   packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE); -		dev_info(&pf->pdev->dev, "FD raw packet:\n");  		for (i = 0; i < packet_len; i++) {  			sscanf(&asc_packet[j], "%2hhx ", -			       &fd_data.raw_packet[i]); +			       &raw_packet[i]);  			j += 3; -			snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]); -			print_buf += 3; -			if ((i % 16) == 15) { -				snprintf(print_buf, 1, "\n"); -				print_buf++; -			}  		} -		dev_info(&pf->pdev->dev, "%s\n", print_buf_start); -		ret = i40e_program_fdir_filter(&fd_data, pf, add); +		dev_info(&pf->pdev->dev, "FD raw packet dump\n"); +		print_hex_dump(KERN_INFO, "FD raw packet: ", +			       DUMP_PREFIX_OFFSET, 16, 1, +			       raw_packet, packet_len, true); +		ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);  		if (!ret) {  			dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");  		} else {  			dev_info(&pf->pdev->dev,  				 "Filter command send failed %d\n", ret);  		} -		kfree(fd_data.raw_packet); -		fd_data.raw_packet = NULL; +		kfree(raw_packet); +		raw_packet = NULL;  		kfree(asc_packet);  		asc_packet = NULL; +	} else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { +		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); +	} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { +		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);  	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {  		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {  			int ret; @@ -1625,8 +1753,35 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  					 pf->hw.aq.asq_last_status);  				goto command_write_done;  			} +			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, +						pf->hw.mac.addr, +						I40E_ETH_P_LLDP, 0, +						pf->vsi[pf->lan_vsi]->seid, +						0, true, NULL, NULL); +			if (ret) { +				dev_info(&pf->pdev->dev, +					"%s: Add Control Packet Filter AQ command failed =0x%x\n", +					__func__, pf->hw.aq.asq_last_status); +				goto command_write_done; +			} +#ifdef CONFIG_I40E_DCB +			pf->dcbx_cap = DCB_CAP_DCBX_HOST | +				       DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */  		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {  			int ret; +			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, +						pf->hw.mac.addr, +						I40E_ETH_P_LLDP, 0, +						pf->vsi[pf->lan_vsi]->seid, +						0, false, NULL, NULL); +			if (ret) { +				dev_info(&pf->pdev->dev, +					"%s: Remove Control Packet Filter AQ command failed =0x%x\n", +					__func__, pf->hw.aq.asq_last_status); +				/* Continue and start FW LLDP anyways */ +			} +  			ret = i40e_aq_start_lldp(&pf->hw, NULL);  			if (ret) {  				dev_info(&pf->pdev->dev, @@ -1634,11 +1789,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  					 pf->hw.aq.asq_last_status);  				goto command_write_done;  			} +#ifdef CONFIG_I40E_DCB +			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | +				       DCB_CAP_DCBX_VER_IEEE; +#endif /* CONFIG_I40E_DCB */  		} else if (strncmp(&cmd_buf[5],  			   "get local", 9) == 0) { -			int ret, i; -			u8 *buff;  			u16 llen, rlen; +			int ret; +			u8 *buff;  			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);  			if (!buff)  				goto command_write_done; @@ -1655,23 +1814,16 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				buff = NULL;  				goto command_write_done;  			} -			dev_info(&pf->pdev->dev, -				 "Get LLDP MIB (local) AQ buffer written back:\n"); -			for (i = 0; i < I40E_LLDPDU_SIZE; i++) { -				snprintf(print_buf, 3, "%02x ", buff[i]); -				print_buf += 3; -				if ((i % 16) == 15) { -					snprintf(print_buf, 1, "\n"); -					print_buf++; -				} -			} -			dev_info(&pf->pdev->dev, "%s\n", print_buf_start); +			dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); +			print_hex_dump(KERN_INFO, "LLDP MIB (local): ", +				       DUMP_PREFIX_OFFSET, 16, 1, +				       buff, I40E_LLDPDU_SIZE, true);  			kfree(buff);  			buff = NULL;  		} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { -			int ret, i; -			u8 *buff;  			u16 llen, rlen; +			int ret; +			u8 *buff;  			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);  			if (!buff)  				goto command_write_done; @@ -1689,17 +1841,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  				buff = NULL;  				goto command_write_done;  			} -			dev_info(&pf->pdev->dev, -				 "Get LLDP MIB (remote) AQ buffer written back:\n"); -			for (i = 0; i < I40E_LLDPDU_SIZE; i++) { -				snprintf(print_buf, 3, "%02x ", buff[i]); -				print_buf += 3; -				if ((i % 16) == 15) { -					snprintf(print_buf, 1, "\n"); -					print_buf++; -				} -			} -			dev_info(&pf->pdev->dev, "%s\n", print_buf_start); +			dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); +			print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", +				       DUMP_PREFIX_OFFSET, 16, 1, +				       buff, I40E_LLDPDU_SIZE, true);  			kfree(buff);  			buff = NULL;  		} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { @@ -1724,7 +1869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  			}  		}  	} else if (strncmp(cmd_buf, "nvm read", 8) == 0) { -		u16 buffer_len, i, bytes; +		u16 buffer_len, bytes;  		u16 module;  		u32 offset;  		u16 *buff; @@ -1747,11 +1892,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  			goto command_write_done;  		} -		/* Read at least 512 words */ -		if (buffer_len == 0) -			buffer_len = 512; +		/* set the max length */ +		buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);  		bytes = 2 * buffer_len; + +		/* read at least 1k bytes, no more than 4kB */ +		bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);  		buff = kzalloc(bytes, GFP_KERNEL);  		if (!buff)  			goto command_write_done; @@ -1776,16 +1923,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  			dev_info(&pf->pdev->dev,  				 "Read NVM module=0x%x offset=0x%x words=%d\n",  				 module, offset, buffer_len); -			for (i = 0; i < buffer_len; i++) { -				if ((i % 16) == 0) { -					snprintf(print_buf, 11, "\n0x%08x: ", -						 offset + i); -					print_buf += 11; -				} -				snprintf(print_buf, 5, "%04x ", buff[i]); -				print_buf += 5; -			} -			dev_info(&pf->pdev->dev, "%s\n", print_buf_start); +			if (bytes) +				print_hex_dump(KERN_INFO, "NVM Dump: ", +					DUMP_PREFIX_OFFSET, 16, 2, +					buff, bytes, true);  		}  		kfree(buff);  		buff = NULL; @@ -1815,8 +1956,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  		dev_info(&pf->pdev->dev, "  pfr\n");  		dev_info(&pf->pdev->dev, "  corer\n");  		dev_info(&pf->pdev->dev, "  globr\n"); +		dev_info(&pf->pdev->dev, "  send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); +		dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");  		dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");  		dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); +		dev_info(&pf->pdev->dev, "  fd-atr off\n"); +		dev_info(&pf->pdev->dev, "  fd-atr on\n");  		dev_info(&pf->pdev->dev, "  lldp start\n");  		dev_info(&pf->pdev->dev, "  lldp stop\n");  		dev_info(&pf->pdev->dev, "  lldp get local\n"); @@ -1829,9 +1974,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,  command_write_done:  	kfree(cmd_buf);  	cmd_buf = NULL; -	kfree(print_buf_start); -	print_buf = NULL; -	print_buf_start = NULL;  	return count;  } @@ -1903,6 +2045,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  	struct i40e_pf *pf = filp->private_data;  	int bytes_not_copied;  	struct i40e_vsi *vsi; +	char *buf_tmp;  	int vsi_seid;  	int i, cnt; @@ -1921,6 +2064,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  		count -= bytes_not_copied;  	i40e_dbg_netdev_ops_buf[count] = '\0'; +	buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); +	if (buf_tmp) { +		*buf_tmp = '\0'; +		count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; +	} +  	if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {  		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);  		if (cnt != 1) { @@ -1931,9 +2080,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  		if (!vsi) {  			dev_info(&pf->pdev->dev,  				 "tx_timeout: VSI %d not found\n", vsi_seid); -			goto netdev_ops_write_done; -		} -		if (rtnl_trylock()) { +		} else if (!vsi->netdev) { +			dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", +				 vsi_seid); +		} else if (test_bit(__I40E_DOWN, &vsi->state)) { +			dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", +				 vsi_seid); +		} else if (rtnl_trylock()) {  			vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);  			rtnl_unlock();  			dev_info(&pf->pdev->dev, "tx_timeout called\n"); @@ -1952,9 +2105,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  		if (!vsi) {  			dev_info(&pf->pdev->dev,  				 "change_mtu: VSI %d not found\n", vsi_seid); -			goto netdev_ops_write_done; -		} -		if (rtnl_trylock()) { +		} else if (!vsi->netdev) { +			dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", +				 vsi_seid); +		} else if (rtnl_trylock()) {  			vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,  								mtu);  			rtnl_unlock(); @@ -1973,9 +2127,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  		if (!vsi) {  			dev_info(&pf->pdev->dev,  				 "set_rx_mode: VSI %d not found\n", vsi_seid); -			goto netdev_ops_write_done; -		} -		if (rtnl_trylock()) { +		} else if (!vsi->netdev) { +			dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", +				 vsi_seid); +		} else if (rtnl_trylock()) {  			vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);  			rtnl_unlock();  			dev_info(&pf->pdev->dev, "set_rx_mode called\n"); @@ -1993,11 +2148,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,  		if (!vsi) {  			dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",  				 vsi_seid); -			goto netdev_ops_write_done; +		} else if (!vsi->netdev) { +			dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", +				 vsi_seid); +		} else { +			for (i = 0; i < vsi->num_q_vectors; i++) +				napi_schedule(&vsi->q_vectors[i]->napi); +			dev_info(&pf->pdev->dev, "napi called\n");  		} -		for (i = 0; i < vsi->num_q_vectors; i++) -			napi_schedule(&vsi->q_vectors[i].napi); -		dev_info(&pf->pdev->dev, "napi called\n");  	} else {  		dev_info(&pf->pdev->dev, "unknown command '%s'\n",  			 i40e_dbg_netdev_ops_buf); @@ -2024,21 +2182,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {   **/  void i40e_dbg_pf_init(struct i40e_pf *pf)  { -	struct dentry *pfile __attribute__((unused)); +	struct dentry *pfile;  	const char *name = pci_name(pf->pdev); +	const struct device *dev = &pf->pdev->dev;  	pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); -	if (pf->i40e_dbg_pf) { -		pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, -					    pf, &i40e_dbg_command_fops); -		pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, -					    &i40e_dbg_dump_fops); -		pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, -					    pf, &i40e_dbg_netdev_ops_fops); -	} else { -		dev_info(&pf->pdev->dev, -			 "debugfs entry for %s failed\n", name); -	} +	if (!pf->i40e_dbg_pf) +		return; + +	pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, +				    &i40e_dbg_command_fops); +	if (!pfile) +		goto create_failed; + +	pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, +				    &i40e_dbg_dump_fops); +	if (!pfile) +		goto create_failed; + +	pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, +				    &i40e_dbg_netdev_ops_fops); +	if (!pfile) +		goto create_failed; + +	return; + +create_failed: +	dev_info(dev, "debugfs dir/file for %s failed\n", name); +	debugfs_remove_recursive(pf->i40e_dbg_pf); +	return;  }  /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index de255143bde..56438bd579e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -68,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,  struct i40e_diag_reg_test_info i40e_reg_list[] = {  	/* offset               mask         elements   stride */ -	{I40E_QTX_CTL(0),       0x0000FFBF,  64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, -	{I40E_PFINT_ITR0(0),    0x00000FFF,   3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, -	{I40E_PFINT_ITRN(0, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, -	{I40E_PFINT_ITRN(1, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, -	{I40E_PFINT_ITRN(2, 0), 0x00000FFF,  64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, -	{I40E_PFINT_STAT_CTL0,  0x0000000C,   1, 0}, -	{I40E_PFINT_LNKLST0,    0x00001FFF,   1, 0}, -	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, -	{I40E_QINT_TQCTL(0),    0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, -	{I40E_QINT_RQCTL(0),    0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, -	{I40E_PFINT_ICR0_ENA,   0xF7F20000,   1, 0}, +	{I40E_QTX_CTL(0),       0x0000FFBF, 1, +		I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, +	{I40E_PFINT_ITR0(0),    0x00000FFF, 3, +		I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)}, +	{I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1, +		I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)}, +	{I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1, +		I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)}, +	{I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1, +		I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)}, +	{I40E_PFINT_STAT_CTL0,  0x0000000C, 1, 0}, +	{I40E_PFINT_LNKLST0,    0x00001FFF, 1, 0}, +	{I40E_PFINT_LNKLSTN(0), 0x000007FF, 1, +		I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)}, +	{I40E_QINT_TQCTL(0),    0x000000FF, 1, +		I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)}, +	{I40E_QINT_RQCTL(0),    0x000000FF, 1, +		I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)}, +	{I40E_PFINT_ICR0_ENA,   0xF7F20000, 1, 0},  	{ 0 }  }; @@ -94,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)  	u32 reg, mask;  	u32 i, j; -	for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) { +	for (i = 0; i40e_reg_list[i].offset != 0 && +					     !ret_code; i++) { + +		/* set actual reg range for dynamically allocated resources */ +		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && +		    hw->func_caps.num_tx_qp != 0) +			i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; +		if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || +		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || +		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || +		     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || +		     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && +		    hw->func_caps.num_msix_vectors != 0) +			i40e_reg_list[i].elements = +				hw->func_caps.num_msix_vectors - 1; + +		/* test register access */  		mask = i40e_reg_list[i].mask; -		for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) { +		for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {  			reg = i40e_reg_list[i].offset +  			      (j * i40e_reg_list[i].stride);  			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); @@ -119,7 +142,7 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)  	/* read NVM control word and if NVM valid, validate EEPROM checksum*/  	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, ®_val); -	if ((!ret_code) && +	if (!ret_code &&  	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==  	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {  		ret_code = i40e_validate_nvm_checksum(hw, NULL); diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index 3d98277f452..0b591165208 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -31,10 +30,10 @@  #include "i40e_type.h"  enum i40e_lb_mode { -	I40E_LB_MODE_NONE = 0, -	I40E_LB_MODE_PHY_LOCAL, -	I40E_LB_MODE_PHY_REMOTE, -	I40E_LB_MODE_MAC_LOCAL, +	I40E_LB_MODE_NONE       = 0x0, +	I40E_LB_MODE_PHY_LOCAL  = I40E_AQ_LB_PHY_LOCAL, +	I40E_LB_MODE_PHY_REMOTE = I40E_AQ_LB_PHY_REMOTE, +	I40E_LB_MODE_MAC_LOCAL  = I40E_AQ_LB_MAC_LOCAL,  };  struct i40e_diag_reg_test_info { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9a76b8cec76..4a488ffcd6b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -47,6 +46,8 @@ struct i40e_stats {  		I40E_STAT(struct i40e_pf, _name, _stat)  #define I40E_VSI_STAT(_name, _stat) \  		I40E_STAT(struct i40e_vsi, _name, _stat) +#define I40E_VEB_STAT(_name, _stat) \ +		I40E_STAT(struct i40e_veb, _name, _stat)  static const struct i40e_stats i40e_gstrings_net_stats[] = {  	I40E_NETDEV_STAT(rx_packets), @@ -57,12 +58,39 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {  	I40E_NETDEV_STAT(tx_errors),  	I40E_NETDEV_STAT(rx_dropped),  	I40E_NETDEV_STAT(tx_dropped), -	I40E_NETDEV_STAT(multicast),  	I40E_NETDEV_STAT(collisions),  	I40E_NETDEV_STAT(rx_length_errors),  	I40E_NETDEV_STAT(rx_crc_errors),  }; +static const struct i40e_stats i40e_gstrings_veb_stats[] = { +	I40E_VEB_STAT("rx_bytes", stats.rx_bytes), +	I40E_VEB_STAT("tx_bytes", stats.tx_bytes), +	I40E_VEB_STAT("rx_unicast", stats.rx_unicast), +	I40E_VEB_STAT("tx_unicast", stats.tx_unicast), +	I40E_VEB_STAT("rx_multicast", stats.rx_multicast), +	I40E_VEB_STAT("tx_multicast", stats.tx_multicast), +	I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), +	I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), +	I40E_VEB_STAT("rx_discards", stats.rx_discards), +	I40E_VEB_STAT("tx_discards", stats.tx_discards), +	I40E_VEB_STAT("tx_errors", stats.tx_errors), +	I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), +}; + +static const struct i40e_stats i40e_gstrings_misc_stats[] = { +	I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), +	I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), +	I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), +	I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), +	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), +	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), +	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), +}; + +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, +				 struct ethtool_rxnfc *cmd); +  /* These PF_STATs might look like duplicates of some NETDEV_STATs,   * but they are separate.  This device supports Virtualization, and   * as such might have several netdevs supporting VMDq and FCoE going @@ -76,7 +104,12 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {  static struct i40e_stats i40e_gstrings_stats[] = {  	I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),  	I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), -	I40E_PF_STAT("rx_errors", stats.eth.rx_errors), +	I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), +	I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), +	I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), +	I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), +	I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), +	I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),  	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),  	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),  	I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), @@ -85,6 +118,8 @@ static struct i40e_stats i40e_gstrings_stats[] = {  	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),  	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),  	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), +	I40E_PF_STAT("tx_timeout", tx_timeout_count), +	I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),  	I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),  	I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),  	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), @@ -109,14 +144,26 @@ static struct i40e_stats i40e_gstrings_stats[] = {  	I40E_PF_STAT("rx_oversize", stats.rx_oversize),  	I40E_PF_STAT("rx_jabber", stats.rx_jabber),  	I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), +	I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), +	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), +	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), + +	/* LPI stats */ +	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), +	I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), +	I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), +	I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),  };  #define I40E_QUEUE_STATS_LEN(n) \ -  ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \ -    ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2) +	(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ +	    * 2 /* Tx and Rx together */                                     \ +	    * (sizeof(struct i40e_queue_stats) / sizeof(u64)))  #define I40E_GLOBAL_STATS_LEN	ARRAY_SIZE(i40e_gstrings_stats)  #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats) +#define I40E_MISC_STATS_LEN	ARRAY_SIZE(i40e_gstrings_misc_stats)  #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \ +				 I40E_MISC_STATS_LEN + \  				 I40E_QUEUE_STATS_LEN((n)))  #define I40E_PFC_STATS_LEN ( \  		(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ @@ -125,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {  		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \  		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \  		 / sizeof(u64)) +#define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)  #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \  				 I40E_PFC_STATS_LEN + \  				 I40E_VSI_STATS_LEN((n))) @@ -193,28 +241,48 @@ static int i40e_get_settings(struct net_device *netdev,  		ecmd->supported = SUPPORTED_10000baseKR_Full;  		ecmd->advertising = ADVERTISED_10000baseKR_Full;  		break; -	case I40E_PHY_TYPE_10GBASE_T:  	default: -		ecmd->supported = SUPPORTED_10000baseT_Full; -		ecmd->advertising = ADVERTISED_10000baseT_Full; +		if (i40e_is_40G_device(hw->device_id)) { +			ecmd->supported = SUPPORTED_40000baseSR4_Full; +			ecmd->advertising = ADVERTISED_40000baseSR4_Full; +		} else { +			ecmd->supported = SUPPORTED_10000baseT_Full; +			ecmd->advertising = ADVERTISED_10000baseT_Full; +		}  		break;  	} -	/* for now just say autoneg all the time */  	ecmd->supported |= SUPPORTED_Autoneg; +	ecmd->advertising |= ADVERTISED_Autoneg; +	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? +			  AUTONEG_ENABLE : AUTONEG_DISABLE); -	if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) { +	switch (hw->phy.media_type) { +	case I40E_MEDIA_TYPE_BACKPLANE:  		ecmd->supported |= SUPPORTED_Backplane;  		ecmd->advertising |= ADVERTISED_Backplane;  		ecmd->port = PORT_NONE; -	} else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) { +		break; +	case I40E_MEDIA_TYPE_BASET:  		ecmd->supported |= SUPPORTED_TP;  		ecmd->advertising |= ADVERTISED_TP;  		ecmd->port = PORT_TP; -	} else { +		break; +	case I40E_MEDIA_TYPE_DA: +	case I40E_MEDIA_TYPE_CX4: +		ecmd->supported |= SUPPORTED_FIBRE; +		ecmd->advertising |= ADVERTISED_FIBRE; +		ecmd->port = PORT_DA; +		break; +	case I40E_MEDIA_TYPE_FIBER:  		ecmd->supported |= SUPPORTED_FIBRE;  		ecmd->advertising |= ADVERTISED_FIBRE;  		ecmd->port = PORT_FIBRE; +		break; +	case I40E_MEDIA_TYPE_UNKNOWN: +	default: +		ecmd->port = PORT_OTHER; +		break;  	}  	ecmd->transceiver = XCVR_EXTERNAL; @@ -256,12 +324,14 @@ static void i40e_get_pauseparam(struct net_device *netdev,  		((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?  		  AUTONEG_ENABLE : AUTONEG_DISABLE); -	pause->rx_pause = 0; -	pause->tx_pause = 0; -	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX) +	if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { +		pause->rx_pause = 1; +	} else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { +		pause->tx_pause = 1; +	} else if (hw->fc.current_mode == I40E_FC_FULL) {  		pause->rx_pause = 1; -	if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)  		pause->tx_pause = 1; +	}  }  static u32 i40e_get_msglevel(struct net_device *netdev) @@ -329,38 +399,56 @@ static int i40e_get_eeprom(struct net_device *netdev,  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_hw *hw = &np->vsi->back->hw; -	int first_word, last_word; -	u16 i, eeprom_len; -	u16 *eeprom_buff; -	int ret_val = 0; - +	struct i40e_pf *pf = np->vsi->back; +	int ret_val = 0, len; +	u8 *eeprom_buff; +	u16 i, sectors; +	bool last; +#define I40E_NVM_SECTOR_SIZE  4096  	if (eeprom->len == 0)  		return -EINVAL;  	eeprom->magic = hw->vendor_id | (hw->device_id << 16); -	first_word = eeprom->offset >> 1; -	last_word = (eeprom->offset + eeprom->len - 1) >> 1; -	eeprom_len = last_word - first_word + 1; - -	eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL); +	eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);  	if (!eeprom_buff)  		return -ENOMEM; -	ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len, -					   eeprom_buff); -	if (eeprom_len == 0) { -		kfree(eeprom_buff); -		return -EACCES; +	ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); +	if (ret_val) { +		dev_info(&pf->pdev->dev, +			 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", +			 ret_val, hw->aq.asq_last_status); +		goto free_buff;  	} -	/* Device's eeprom is always little-endian, word addressable */ -	for (i = 0; i < eeprom_len; i++) -		le16_to_cpus(&eeprom_buff[i]); +	sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; +	sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; +	len = I40E_NVM_SECTOR_SIZE; +	last = false; +	for (i = 0; i < sectors; i++) { +		if (i == (sectors - 1)) { +			len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); +			last = true; +		} +		ret_val = i40e_aq_read_nvm(hw, 0x0, +				eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), +				len, +				eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), +				last, NULL); +		if (ret_val) { +			dev_info(&pf->pdev->dev, +				 "read NVM failed err=%d status=0x%x\n", +				 ret_val, hw->aq.asq_last_status); +			goto release_nvm; +		} +	} -	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); +release_nvm: +	i40e_release_nvm(hw); +	memcpy(bytes, eeprom_buff, eeprom->len); +free_buff:  	kfree(eeprom_buff); -  	return ret_val;  } @@ -368,8 +456,14 @@ static int i40e_get_eeprom_len(struct net_device *netdev)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_hw *hw = &np->vsi->back->hw; - -	return hw->nvm.sr_size * 2; +	u32 val; + +	val = (rd32(hw, I40E_GLPCI_LBARCTRL) +		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) +		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; +	/* register returns value in power of 2, 64Kbyte chunks. */ +	val = (64 * 1024) * (1 << val); +	return val;  }  static void i40e_get_drvinfo(struct net_device *netdev, @@ -399,8 +493,8 @@ static void i40e_get_ringparam(struct net_device *netdev,  	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;  	ring->rx_mini_max_pending = 0;  	ring->rx_jumbo_max_pending = 0; -	ring->rx_pending = vsi->rx_rings[0].count; -	ring->tx_pending = vsi->tx_rings[0].count; +	ring->rx_pending = vsi->rx_rings[0]->count; +	ring->tx_pending = vsi->tx_rings[0]->count;  	ring->rx_mini_pending = 0;  	ring->rx_jumbo_pending = 0;  } @@ -418,19 +512,23 @@ static int i40e_set_ringparam(struct net_device *netdev,  	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))  		return -EINVAL; -	new_tx_count = clamp_t(u32, ring->tx_pending, -			       I40E_MIN_NUM_DESCRIPTORS, -			       I40E_MAX_NUM_DESCRIPTORS); -	new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); +	if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || +	    ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || +	    ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || +	    ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { +		netdev_info(netdev, +			    "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", +			    ring->tx_pending, ring->rx_pending, +			    I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); +		return -EINVAL; +	} -	new_rx_count = clamp_t(u32, ring->rx_pending, -			       I40E_MIN_NUM_DESCRIPTORS, -			       I40E_MAX_NUM_DESCRIPTORS); -	new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); +	new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); +	new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE);  	/* if nothing to do return success */ -	if ((new_tx_count == vsi->tx_rings[0].count) && -	    (new_rx_count == vsi->rx_rings[0].count)) +	if ((new_tx_count == vsi->tx_rings[0]->count) && +	    (new_rx_count == vsi->rx_rings[0]->count))  		return 0;  	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) @@ -439,8 +537,8 @@ static int i40e_set_ringparam(struct net_device *netdev,  	if (!netif_running(vsi->netdev)) {  		/* simple case - set for the next time the netdev is started */  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			vsi->tx_rings[i].count = new_tx_count; -			vsi->rx_rings[i].count = new_rx_count; +			vsi->tx_rings[i]->count = new_tx_count; +			vsi->rx_rings[i]->count = new_rx_count;  		}  		goto done;  	} @@ -451,10 +549,10 @@ static int i40e_set_ringparam(struct net_device *netdev,  	 */  	/* alloc updated Tx resources */ -	if (new_tx_count != vsi->tx_rings[0].count) { +	if (new_tx_count != vsi->tx_rings[0]->count) {  		netdev_info(netdev,  			    "Changing Tx descriptor count from %d to %d.\n", -			    vsi->tx_rings[0].count, new_tx_count); +			    vsi->tx_rings[0]->count, new_tx_count);  		tx_rings = kcalloc(vsi->alloc_queue_pairs,  				   sizeof(struct i40e_ring), GFP_KERNEL);  		if (!tx_rings) { @@ -464,7 +562,7 @@ static int i40e_set_ringparam(struct net_device *netdev,  		for (i = 0; i < vsi->num_queue_pairs; i++) {  			/* clone ring and setup updated count */ -			tx_rings[i] = vsi->tx_rings[i]; +			tx_rings[i] = *vsi->tx_rings[i];  			tx_rings[i].count = new_tx_count;  			err = i40e_setup_tx_descriptors(&tx_rings[i]);  			if (err) { @@ -481,10 +579,10 @@ static int i40e_set_ringparam(struct net_device *netdev,  	}  	/* alloc updated Rx resources */ -	if (new_rx_count != vsi->rx_rings[0].count) { +	if (new_rx_count != vsi->rx_rings[0]->count) {  		netdev_info(netdev,  			    "Changing Rx descriptor count from %d to %d\n", -			    vsi->rx_rings[0].count, new_rx_count); +			    vsi->rx_rings[0]->count, new_rx_count);  		rx_rings = kcalloc(vsi->alloc_queue_pairs,  				   sizeof(struct i40e_ring), GFP_KERNEL);  		if (!rx_rings) { @@ -494,7 +592,7 @@ static int i40e_set_ringparam(struct net_device *netdev,  		for (i = 0; i < vsi->num_queue_pairs; i++) {  			/* clone ring and setup updated count */ -			rx_rings[i] = vsi->rx_rings[i]; +			rx_rings[i] = *vsi->rx_rings[i];  			rx_rings[i].count = new_rx_count;  			err = i40e_setup_rx_descriptors(&rx_rings[i]);  			if (err) { @@ -517,8 +615,8 @@ static int i40e_set_ringparam(struct net_device *netdev,  	if (tx_rings) {  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			i40e_free_tx_resources(&vsi->tx_rings[i]); -			vsi->tx_rings[i] = tx_rings[i]; +			i40e_free_tx_resources(vsi->tx_rings[i]); +			*vsi->tx_rings[i] = tx_rings[i];  		}  		kfree(tx_rings);  		tx_rings = NULL; @@ -526,8 +624,8 @@ static int i40e_set_ringparam(struct net_device *netdev,  	if (rx_rings) {  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			i40e_free_rx_resources(&vsi->rx_rings[i]); -			vsi->rx_rings[i] = rx_rings[i]; +			i40e_free_rx_resources(vsi->rx_rings[i]); +			*vsi->rx_rings[i] = rx_rings[i];  		}  		kfree(rx_rings);  		rx_rings = NULL; @@ -560,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)  	case ETH_SS_TEST:  		return I40E_TEST_LEN;  	case ETH_SS_STATS: -		if (vsi == pf->vsi[pf->lan_vsi]) -			return I40E_PF_STATS_LEN(netdev); -		else +		if (vsi == pf->vsi[pf->lan_vsi]) { +			int len = I40E_PF_STATS_LEN(netdev); + +			if (pf->lan_veb != I40E_NO_VEB) +				len += I40E_VEB_STATS_LEN; +			return len; +		} else {  			return I40E_VSI_STATS_LEN(netdev); +		}  	default:  		return -EOPNOTSUPP;  	} @@ -573,12 +676,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,  				   struct ethtool_stats *stats, u64 *data)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_ring *tx_ring, *rx_ring;  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	int i = 0;  	char *p;  	int j;  	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); +	unsigned int start;  	i40e_update_stats(vsi); @@ -587,31 +692,63 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,  		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==  			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;  	} -	for (j = 0; j < vsi->num_queue_pairs; j++) { -		data[i++] = vsi->tx_rings[j].tx_stats.packets; -		data[i++] = vsi->tx_rings[j].tx_stats.bytes; +	for (j = 0; j < I40E_MISC_STATS_LEN; j++) { +		p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; +		data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == +			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;  	} +	rcu_read_lock();  	for (j = 0; j < vsi->num_queue_pairs; j++) { -		data[i++] = vsi->rx_rings[j].rx_stats.packets; -		data[i++] = vsi->rx_rings[j].rx_stats.bytes; +		tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + +		if (!tx_ring) +			continue; + +		/* process Tx ring statistics */ +		do { +			start = u64_stats_fetch_begin_irq(&tx_ring->syncp); +			data[i] = tx_ring->stats.packets; +			data[i + 1] = tx_ring->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); +		i += 2; + +		/* Rx ring is the 2nd half of the queue pair */ +		rx_ring = &tx_ring[1]; +		do { +			start = u64_stats_fetch_begin_irq(&rx_ring->syncp); +			data[i] = rx_ring->stats.packets; +			data[i + 1] = rx_ring->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); +		i += 2;  	} -	if (vsi == pf->vsi[pf->lan_vsi]) { -		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { -			p = (char *)pf + i40e_gstrings_stats[j].stat_offset; -			data[i++] = (i40e_gstrings_stats[j].sizeof_stat == -				   sizeof(u64)) ? *(u64 *)p : *(u32 *)p; -		} -		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { -			data[i++] = pf->stats.priority_xon_tx[j]; -			data[i++] = pf->stats.priority_xoff_tx[j]; -		} -		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { -			data[i++] = pf->stats.priority_xon_rx[j]; -			data[i++] = pf->stats.priority_xoff_rx[j]; +	rcu_read_unlock(); +	if (vsi != pf->vsi[pf->lan_vsi]) +		return; + +	if (pf->lan_veb != I40E_NO_VEB) { +		struct i40e_veb *veb = pf->veb[pf->lan_veb]; +		for (j = 0; j < I40E_VEB_STATS_LEN; j++) { +			p = (char *)veb; +			p += i40e_gstrings_veb_stats[j].stat_offset; +			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == +				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;  		} -		for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) -			data[i++] = pf->stats.priority_xon_2_xoff[j];  	} +	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { +		p = (char *)pf + i40e_gstrings_stats[j].stat_offset; +		data[i++] = (i40e_gstrings_stats[j].sizeof_stat == +			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p; +	} +	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { +		data[i++] = pf->stats.priority_xon_tx[j]; +		data[i++] = pf->stats.priority_xoff_tx[j]; +	} +	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { +		data[i++] = pf->stats.priority_xon_rx[j]; +		data[i++] = pf->stats.priority_xoff_rx[j]; +	} +	for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) +		data[i++] = pf->stats.priority_xon_2_xoff[j];  }  static void i40e_get_strings(struct net_device *netdev, u32 stringset, @@ -636,46 +773,57 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,  				 i40e_gstrings_net_stats[i].stat_string);  			p += ETH_GSTRING_LEN;  		} +		for (i = 0; i < I40E_MISC_STATS_LEN; i++) { +			snprintf(p, ETH_GSTRING_LEN, "%s", +				 i40e_gstrings_misc_stats[i].stat_string); +			p += ETH_GSTRING_LEN; +		}  		for (i = 0; i < vsi->num_queue_pairs; i++) {  			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);  			p += ETH_GSTRING_LEN;  			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);  			p += ETH_GSTRING_LEN; -		} -		for (i = 0; i < vsi->num_queue_pairs; i++) {  			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);  			p += ETH_GSTRING_LEN;  			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);  			p += ETH_GSTRING_LEN;  		} -		if (vsi == pf->vsi[pf->lan_vsi]) { -			for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { -				snprintf(p, ETH_GSTRING_LEN, "port.%s", -					 i40e_gstrings_stats[i].stat_string); -				p += ETH_GSTRING_LEN; -			} -			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { -				snprintf(p, ETH_GSTRING_LEN, -					 "port.tx_priority_%u_xon", i); -				p += ETH_GSTRING_LEN; -				snprintf(p, ETH_GSTRING_LEN, -					 "port.tx_priority_%u_xoff", i); -				p += ETH_GSTRING_LEN; -			} -			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { -				snprintf(p, ETH_GSTRING_LEN, -					 "port.rx_priority_%u_xon", i); -				p += ETH_GSTRING_LEN; -				snprintf(p, ETH_GSTRING_LEN, -					 "port.rx_priority_%u_xoff", i); -				p += ETH_GSTRING_LEN; -			} -			for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { -				snprintf(p, ETH_GSTRING_LEN, -					 "port.rx_priority_%u_xon_2_xoff", i); +		if (vsi != pf->vsi[pf->lan_vsi]) +			return; + +		if (pf->lan_veb != I40E_NO_VEB) { +			for (i = 0; i < I40E_VEB_STATS_LEN; i++) { +				snprintf(p, ETH_GSTRING_LEN, "veb.%s", +					i40e_gstrings_veb_stats[i].stat_string);  				p += ETH_GSTRING_LEN;  			}  		} +		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { +			snprintf(p, ETH_GSTRING_LEN, "port.%s", +				 i40e_gstrings_stats[i].stat_string); +			p += ETH_GSTRING_LEN; +		} +		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { +			snprintf(p, ETH_GSTRING_LEN, +				 "port.tx_priority_%u_xon", i); +			p += ETH_GSTRING_LEN; +			snprintf(p, ETH_GSTRING_LEN, +				 "port.tx_priority_%u_xoff", i); +			p += ETH_GSTRING_LEN; +		} +		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { +			snprintf(p, ETH_GSTRING_LEN, +				 "port.rx_priority_%u_xon", i); +			p += ETH_GSTRING_LEN; +			snprintf(p, ETH_GSTRING_LEN, +				 "port.rx_priority_%u_xoff", i); +			p += ETH_GSTRING_LEN; +		} +		for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { +			snprintf(p, ETH_GSTRING_LEN, +				 "port.rx_priority_%u_xon_2_xoff", i); +			p += ETH_GSTRING_LEN; +		}  		/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */  		break;  	} @@ -684,11 +832,44 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,  static int i40e_get_ts_info(struct net_device *dev,  			    struct ethtool_ts_info *info)  { -	return ethtool_op_get_ts_info(dev, info); +	struct i40e_pf *pf = i40e_netdev_to_pf(dev); + +	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | +				SOF_TIMESTAMPING_RX_SOFTWARE | +				SOF_TIMESTAMPING_SOFTWARE | +				SOF_TIMESTAMPING_TX_HARDWARE | +				SOF_TIMESTAMPING_RX_HARDWARE | +				SOF_TIMESTAMPING_RAW_HARDWARE; + +	if (pf->ptp_clock) +		info->phc_index = ptp_clock_index(pf->ptp_clock); +	else +		info->phc_index = -1; + +	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + +	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | +			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | +			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | +			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + +	return 0;  } -static int i40e_link_test(struct i40e_pf *pf, u64 *data) +static int i40e_link_test(struct net_device *netdev, u64 *data)  { +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; + +	netif_info(pf, hw, netdev, "link test\n");  	if (i40e_get_link_status(&pf->hw))  		*data = 0;  	else @@ -697,36 +878,51 @@ static int i40e_link_test(struct i40e_pf *pf, u64 *data)  	return *data;  } -static int i40e_reg_test(struct i40e_pf *pf, u64 *data) +static int i40e_reg_test(struct net_device *netdev, u64 *data)  { -	i40e_status ret; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; -	ret = i40e_diag_reg_test(&pf->hw); -	*data = ret; +	netif_info(pf, hw, netdev, "register test\n"); +	*data = i40e_diag_reg_test(&pf->hw); -	return ret; +	return *data;  } -static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data) +static int i40e_eeprom_test(struct net_device *netdev, u64 *data)  { -	i40e_status ret; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; -	ret = i40e_diag_eeprom_test(&pf->hw); -	*data = ret; +	netif_info(pf, hw, netdev, "eeprom test\n"); +	*data = i40e_diag_eeprom_test(&pf->hw); -	return ret; +	return *data;  } -static int i40e_intr_test(struct i40e_pf *pf, u64 *data) +static int i40e_intr_test(struct net_device *netdev, u64 *data)  { -	*data = -ENOSYS; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; +	u16 swc_old = pf->sw_int_count; + +	netif_info(pf, hw, netdev, "interrupt test\n"); +	wr32(&pf->hw, I40E_PFINT_DYN_CTL0, +	     (I40E_PFINT_DYN_CTL0_INTENA_MASK | +	      I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); +	usleep_range(1000, 2000); +	*data = (swc_old == pf->sw_int_count);  	return *data;  } -static int i40e_loopback_test(struct i40e_pf *pf, u64 *data) +static int i40e_loopback_test(struct net_device *netdev, u64 *data)  { -	*data = -ENOSYS; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; + +	netif_info(pf, hw, netdev, "loopback test not implemented\n"); +	*data = 0;  	return *data;  } @@ -737,42 +933,38 @@ static void i40e_diag_test(struct net_device *netdev,  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_pf *pf = np->vsi->back; -	set_bit(__I40E_TESTING, &pf->state);  	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {  		/* Offline tests */ +		netif_info(pf, drv, netdev, "offline testing starting\n"); -		netdev_info(netdev, "offline testing starting\n"); +		set_bit(__I40E_TESTING, &pf->state);  		/* Link test performed before hardware reset  		 * so autoneg doesn't interfere with test result  		 */ -		netdev_info(netdev, "link test starting\n"); -		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK])) +		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))  			eth_test->flags |= ETH_TEST_FL_FAILED; -		netdev_info(netdev, "register test starting\n"); -		if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG])) +		if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM]))  			eth_test->flags |= ETH_TEST_FL_FAILED; -		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); -		netdev_info(netdev, "eeprom test starting\n"); -		if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM])) +		if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))  			eth_test->flags |= ETH_TEST_FL_FAILED; -		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); -		netdev_info(netdev, "interrupt test starting\n"); -		if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR])) +		if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))  			eth_test->flags |= ETH_TEST_FL_FAILED; -		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); -		netdev_info(netdev, "loopback test starting\n"); -		if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK])) +		/* run reg test last, a reset is required after it */ +		if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))  			eth_test->flags |= ETH_TEST_FL_FAILED; +		clear_bit(__I40E_TESTING, &pf->state); +		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));  	} else { -		netdev_info(netdev, "online test starting\n");  		/* Online tests */ -		if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK])) +		netif_info(pf, drv, netdev, "online testing starting\n"); + +		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))  			eth_test->flags |= ETH_TEST_FL_FAILED;  		/* Offline only tests, not run in online; pass by default */ @@ -780,16 +972,53 @@ static void i40e_diag_test(struct net_device *netdev,  		data[I40E_ETH_TEST_EEPROM] = 0;  		data[I40E_ETH_TEST_INTR] = 0;  		data[I40E_ETH_TEST_LOOPBACK] = 0; - -		clear_bit(__I40E_TESTING, &pf->state);  	} + +	netif_info(pf, drv, netdev, "testing finished\n");  }  static void i40e_get_wol(struct net_device *netdev,  			 struct ethtool_wolinfo *wol)  { -	wol->supported = 0; -	wol->wolopts = 0; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; +	struct i40e_hw *hw = &pf->hw; +	u16 wol_nvm_bits; + +	/* NVM bit on means WoL disabled for the port */ +	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); +	if ((1 << hw->port) & wol_nvm_bits) { +		wol->supported = 0; +		wol->wolopts = 0; +	} else { +		wol->supported = WAKE_MAGIC; +		wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); +	} +} + +static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; +	struct i40e_hw *hw = &pf->hw; +	u16 wol_nvm_bits; + +	/* NVM bit on means WoL disabled for the port */ +	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); +	if (((1 << hw->port) & wol_nvm_bits)) +		return -EOPNOTSUPP; + +	/* only magic packet is supported */ +	if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) +		return -EOPNOTSUPP; + +	/* is this a new value? */ +	if (pf->wol_en != !!wol->wolopts) { +		pf->wol_en = !!wol->wolopts; +		device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); +	} + +	return 0;  }  static int i40e_nway_reset(struct net_device *netdev) @@ -823,13 +1052,13 @@ static int i40e_set_phys_id(struct net_device *netdev,  		pf->led_status = i40e_led_get(hw);  		return blink_freq;  	case ETHTOOL_ID_ON: -		i40e_led_set(hw, 0xF); +		i40e_led_set(hw, 0xF, false);  		break;  	case ETHTOOL_ID_OFF: -		i40e_led_set(hw, 0x0); +		i40e_led_set(hw, 0x0, false);  		break;  	case ETHTOOL_ID_INACTIVE: -		i40e_led_set(hw, pf->led_status); +		i40e_led_set(hw, pf->led_status, false);  		break;  	} @@ -851,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,  	ec->rx_max_coalesced_frames_irq = vsi->work_limit;  	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) -		ec->rx_coalesce_usecs = 1; -	else -		ec->rx_coalesce_usecs = vsi->rx_itr_setting; +		ec->use_adaptive_rx_coalesce = 1;  	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) -		ec->tx_coalesce_usecs = 1; -	else -		ec->tx_coalesce_usecs = vsi->tx_itr_setting; +		ec->use_adaptive_tx_coalesce = 1; + +	ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; +	ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;  	return 0;  } @@ -877,41 +1105,31 @@ static int i40e_set_coalesce(struct net_device *netdev,  	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)  		vsi->work_limit = ec->tx_max_coalesced_frames_irq; -	switch (ec->rx_coalesce_usecs) { -	case 0: -		vsi->rx_itr_setting = 0; -		break; -	case 1: -		vsi->rx_itr_setting = (I40E_ITR_DYNAMIC | -				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); -		break; -	default: -		if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || -		    (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) -			return -EINVAL; +	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && +	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))  		vsi->rx_itr_setting = ec->rx_coalesce_usecs; -		break; -	} +	else +		return -EINVAL; -	switch (ec->tx_coalesce_usecs) { -	case 0: -		vsi->tx_itr_setting = 0; -		break; -	case 1: -		vsi->tx_itr_setting = (I40E_ITR_DYNAMIC | -				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); -		break; -	default: -		if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || -		    (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) -			return -EINVAL; +	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && +	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))  		vsi->tx_itr_setting = ec->tx_coalesce_usecs; -		break; -	} +	else +		return -EINVAL; + +	if (ec->use_adaptive_rx_coalesce) +		vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; +	else +		vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + +	if (ec->use_adaptive_tx_coalesce) +		vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; +	else +		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;  	vector = vsi->base_vector; -	q_vector = vsi->q_vectors; -	for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) { +	for (i = 0; i < vsi->num_q_vectors; i++, vector++) { +		q_vector = vsi->q_vectors[i];  		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);  		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);  		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); @@ -965,6 +1183,92 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)  }  /** + * i40e_get_ethtool_fdir_all - Populates the rule count of a command + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * @rule_locs: Array of used rule locations + * + * This function populates both the total and actual rule count of + * the ethtool flow classification command + * + * Returns 0 on success or -EMSGSIZE if entry not found + **/ +static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, +				     struct ethtool_rxnfc *cmd, +				     u32 *rule_locs) +{ +	struct i40e_fdir_filter *rule; +	struct hlist_node *node2; +	int cnt = 0; + +	/* report total rule count */ +	cmd->data = i40e_get_fd_cnt_all(pf); + +	hlist_for_each_entry_safe(rule, node2, +				  &pf->fdir_filter_list, fdir_node) { +		if (cnt == cmd->rule_cnt) +			return -EMSGSIZE; + +		rule_locs[cnt] = rule->fd_id; +		cnt++; +	} + +	cmd->rule_cnt = cnt; + +	return 0; +} + +/** + * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow + * @pf: Pointer to the physical function struct + * @cmd: The command to get or set Rx flow classification rules + * + * This function looks up a filter based on the Rx flow classification + * command and fills the flow spec info for it if found + * + * Returns 0 on success or -EINVAL if filter not found + **/ +static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, +				       struct ethtool_rxnfc *cmd) +{ +	struct ethtool_rx_flow_spec *fsp = +			(struct ethtool_rx_flow_spec *)&cmd->fs; +	struct i40e_fdir_filter *rule = NULL; +	struct hlist_node *node2; + +	hlist_for_each_entry_safe(rule, node2, +				  &pf->fdir_filter_list, fdir_node) { +		if (fsp->location <= rule->fd_id) +			break; +	} + +	if (!rule || fsp->location != rule->fd_id) +		return -EINVAL; + +	fsp->flow_type = rule->flow_type; +	if (fsp->flow_type == IP_USER_FLOW) { +		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; +		fsp->h_u.usr_ip4_spec.proto = 0; +		fsp->m_u.usr_ip4_spec.proto = 0; +	} + +	/* Reverse the src and dest notion, since the HW views them from +	 * Tx perspective where as the user expects it from Rx filter view. +	 */ +	fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; +	fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; +	fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0]; +	fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0]; + +	if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) +		fsp->ring_cookie = RX_CLS_FLOW_DISC; +	else +		fsp->ring_cookie = rule->q_index; + +	return 0; +} + +/**   * i40e_get_rxnfc - command to get RX flow classification rules   * @netdev: network interface device structure   * @cmd: ethtool rxnfc command @@ -988,14 +1292,17 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,  		ret = i40e_get_rss_hash_opts(pf, cmd);  		break;  	case ETHTOOL_GRXCLSRLCNT: +		cmd->rule_cnt = pf->fdir_pf_active_filters; +		/* report total rule count */ +		cmd->data = i40e_get_fd_cnt_all(pf);  		ret = 0;  		break;  	case ETHTOOL_GRXCLSRULE: -		ret = 0; +		ret = i40e_get_ethtool_fdir_entry(pf, cmd);  		break;  	case ETHTOOL_GRXCLSRLALL: -		cmd->data = 500; -		ret = 0; +		ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); +		break;  	default:  		break;  	} @@ -1056,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  	case UDP_V4_FLOW:  		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {  		case 0: -			hena &= -			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); +			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | +				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));  			break;  		case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -			hena |= -			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)  | -			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)); +			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | +				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));  			break;  		default:  			return -EINVAL; @@ -1074,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  	case UDP_V6_FLOW:  		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {  		case 0: -			hena &= -			~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); +			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | +				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));  			break;  		case (RXH_L4_B_0_1 | RXH_L4_B_2_3): -			hena |= -			(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)  | -			((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | -			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6)); +			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | +				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));  			break;  		default:  			return -EINVAL; @@ -1126,265 +1425,191 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)  	return 0;  } -#define IP_HEADER_OFFSET 14  /** - * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_match_fdir_input_set - Match a new filter against an existing one + * @rule: The filter already added + * @input: The new filter to comapre against   * - * Returns 0 if the filters were successfully added or removed + * Returns true if the two input set match   **/ -static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, -				   struct i40e_fdir_data *fd_data, -				   struct ethtool_rx_flow_spec *fsp, bool add) +static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule, +				      struct i40e_fdir_filter *input)  { -	struct i40e_pf *pf = vsi->back; -	struct udphdr *udp; -	struct iphdr *ip; -	bool err = false; -	int ret; -	int i; - -	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); -	udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET -	      + sizeof(struct iphdr)); - -	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; -	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; -	udp->source = fsp->h_u.tcp_ip4_spec.psrc; -	udp->dest = fsp->h_u.tcp_ip4_spec.pdst; - -	for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP; -	     i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) { -		fd_data->pctype = i; -		ret = i40e_program_fdir_filter(fd_data, pf, add); - -		if (ret) { -			dev_info(&pf->pdev->dev, -				 "Filter command send failed for PCTYPE %d (ret = %d)\n", -				 fd_data->pctype, ret); -			err = true; -		} else { -			dev_info(&pf->pdev->dev, -				 "Filter OK for PCTYPE %d (ret = %d)\n", -				 fd_data->pctype, ret); -		} -	} - -	return err ? -EOPNOTSUPP : 0; +	if ((rule->dst_ip[0] != input->dst_ip[0]) || +	    (rule->src_ip[0] != input->src_ip[0]) || +	    (rule->dst_port != input->dst_port) || +	    (rule->src_port != input->src_port)) +		return false; +	return true;  }  /** - * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it + * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry + * @vsi: Pointer to the targeted VSI + * @input: The filter to update or NULL to indicate deletion + * @sw_idx: Software index to the filter + * @cmd: The command to get or set Rx flow classification rules + * + * This function updates (or deletes) a Flow Director entry from + * the hlist of the corresponding PF   * - * Returns 0 if the filters were successfully added or removed + * Returns 0 on success   **/ -static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, -				   struct i40e_fdir_data *fd_data, -				   struct ethtool_rx_flow_spec *fsp, bool add) +static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, +					  struct i40e_fdir_filter *input, +					  u16 sw_idx, +					  struct ethtool_rxnfc *cmd)  { +	struct i40e_fdir_filter *rule, *parent;  	struct i40e_pf *pf = vsi->back; -	struct tcphdr *tcp; -	struct iphdr *ip; -	bool err = false; -	int ret; +	struct hlist_node *node2; +	int err = -EINVAL; -	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); -	tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET -	      + sizeof(struct iphdr)); +	parent = NULL; +	rule = NULL; -	ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; -	tcp->dest = fsp->h_u.tcp_ip4_spec.pdst; - -	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; -	ret = i40e_program_fdir_filter(fd_data, pf, add); +	hlist_for_each_entry_safe(rule, node2, +				  &pf->fdir_filter_list, fdir_node) { +		/* hash found, or no matching entry */ +		if (rule->fd_id >= sw_idx) +			break; +		parent = rule; +	} -	if (ret) { -		dev_info(&pf->pdev->dev, -			 "Filter command send failed for PCTYPE %d (ret = %d)\n", -			 fd_data->pctype, ret); -		err = true; -	} else { -		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", -			 fd_data->pctype, ret); +	/* if there is an old rule occupying our place remove it */ +	if (rule && (rule->fd_id == sw_idx)) { +		if (input && !i40e_match_fdir_input_set(rule, input)) +			err = i40e_add_del_fdir(vsi, rule, false); +		else if (!input) +			err = i40e_add_del_fdir(vsi, rule, false); +		hlist_del(&rule->fdir_node); +		kfree(rule); +		pf->fdir_pf_active_filters--;  	} -	ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src; -	tcp->source = fsp->h_u.tcp_ip4_spec.psrc; +	/* If no input this was a delete, err should be 0 if a rule was +	 * successfully found and removed from the list else -EINVAL +	 */ +	if (!input) +		return err; -	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; +	/* initialize node and set software index */ +	INIT_HLIST_NODE(&input->fdir_node); -	ret = i40e_program_fdir_filter(fd_data, pf, add); -	if (ret) { -		dev_info(&pf->pdev->dev, -			 "Filter command send failed for PCTYPE %d (ret = %d)\n", -			 fd_data->pctype, ret); -		err = true; -	} else { -		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", -			  fd_data->pctype, ret); -	} +	/* add filter to the list */ +	if (parent) +		hlist_add_after(&parent->fdir_node, &input->fdir_node); +	else +		hlist_add_head(&input->fdir_node, +			       &pf->fdir_filter_list); -	return err ? -EOPNOTSUPP : 0; -} +	/* update counts */ +	pf->fdir_pf_active_filters++; -/** - * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required from the FDir descriptor - * @ethtool_rx_flow_spec: the flow spec - * @add: true adds a filter, false removes it - * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, -				    struct i40e_fdir_data *fd_data, -				    struct ethtool_rx_flow_spec *fsp, bool add) -{ -	return -EOPNOTSUPP; +	return 0;  }  /** - * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for - * a specific flow spec - * @vsi: pointer to the targeted VSI - * @fd_data: the flow director data required for the FDir descriptor - * @fsp: the ethtool flow spec - * @add: true adds a filter, false removes it + * i40e_del_fdir_entry - Deletes a Flow Director filter entry + * @vsi: Pointer to the targeted VSI + * @cmd: The command to get or set Rx flow classification rules   * - * Returns 0 if the filters were successfully added or removed - **/ -static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, -				  struct i40e_fdir_data *fd_data, -				  struct ethtool_rx_flow_spec *fsp, bool add) + * The function removes a Flow Director filter entry from the + * hlist of the corresponding PF + * + * Returns 0 on success + */ +static int i40e_del_fdir_entry(struct i40e_vsi *vsi, +			       struct ethtool_rxnfc *cmd)  { +	struct ethtool_rx_flow_spec *fsp = +		(struct ethtool_rx_flow_spec *)&cmd->fs;  	struct i40e_pf *pf = vsi->back; -	struct iphdr *ip; -	bool err = false; -	int ret; -	int i; +	int ret = 0; -	ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET); +	ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); -	ip->saddr = fsp->h_u.usr_ip4_spec.ip4src; -	ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst; -	ip->protocol = fsp->h_u.usr_ip4_spec.proto; - -	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; -	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) { -		fd_data->pctype = i; -		ret = i40e_program_fdir_filter(fd_data, pf, add); - -		if (ret) { -			dev_info(&pf->pdev->dev, -				 "Filter command send failed for PCTYPE %d (ret = %d)\n", -				 fd_data->pctype, ret); -			err = true; -		} else { -			dev_info(&pf->pdev->dev, -				 "Filter OK for PCTYPE %d (ret = %d)\n", -				 fd_data->pctype, ret); -		} -	} - -	return err ? -EOPNOTSUPP : 0; +	i40e_fdir_check_and_reenable(pf); +	return ret;  }  /** - * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for - * a specific flow spec based on their protocol + * i40e_add_fdir_ethtool - Add/Remove Flow Director filters   * @vsi: pointer to the targeted VSI   * @cmd: command to get or set RX flow classification rules - * @add: true adds a filter, false removes it   * - * Returns 0 if the filters were successfully added or removed + * Add Flow Director filters for a specific flow spec based on their + * protocol.  Returns 0 if the filters were successfully added.   **/ -static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi, -			struct ethtool_rxnfc *cmd, bool add) +static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, +				 struct ethtool_rxnfc *cmd)  { -	struct i40e_fdir_data fd_data; -	int ret = -EINVAL; +	struct ethtool_rx_flow_spec *fsp; +	struct i40e_fdir_filter *input;  	struct i40e_pf *pf; -	struct ethtool_rx_flow_spec *fsp = -		(struct ethtool_rx_flow_spec *)&cmd->fs; +	int ret = -EINVAL;  	if (!vsi)  		return -EINVAL;  	pf = vsi->back; +	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) +		return -EOPNOTSUPP; + +	if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) +		return -ENOSPC; + +	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + +	if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + +			      pf->hw.func_caps.fd_filters_guaranteed)) { +		return -EINVAL; +	} +  	if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&  	    (fsp->ring_cookie >= vsi->num_queue_pairs))  		return -EINVAL; -	/* Populate the Flow Director that we have at the moment -	 * and allocate the raw packet buffer for the calling functions -	 */ -	fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, -				     GFP_KERNEL); +	input = kzalloc(sizeof(*input), GFP_KERNEL); -	if (!fd_data.raw_packet) { -		dev_info(&pf->pdev->dev, "Could not allocate memory\n"); +	if (!input)  		return -ENOMEM; -	} -	fd_data.q_index = fsp->ring_cookie; -	fd_data.flex_off = 0; -	fd_data.pctype = 0; -	fd_data.dest_vsi = vsi->id; -	fd_data.dest_ctl = 0; -	fd_data.fd_status = 0; -	fd_data.cnt_index = 0; -	fd_data.fd_id = 0; - -	switch (fsp->flow_type & ~FLOW_EXT) { -	case TCP_V4_FLOW: -		ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); -		break; -	case UDP_V4_FLOW: -		ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); -		break; -	case SCTP_V4_FLOW: -		ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); -		break; -	case IPV4_FLOW: -		ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); -		break; -	case IP_USER_FLOW: -		switch (fsp->h_u.usr_ip4_spec.proto) { -		case IPPROTO_TCP: -			ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); -			break; -		case IPPROTO_UDP: -			ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); -			break; -		case IPPROTO_SCTP: -			ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); -			break; -		default: -			ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); -			break; -		} -		break; -	default: -		dev_info(&pf->pdev->dev, "Could not specify spec type\n"); -		ret = -EINVAL; -	} +	input->fd_id = fsp->location; -	kfree(fd_data.raw_packet); -	fd_data.raw_packet = NULL; +	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) +		input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; +	else +		input->dest_ctl = +			     I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; + +	input->q_index = fsp->ring_cookie; +	input->flex_off = 0; +	input->pctype = 0; +	input->dest_vsi = vsi->id; +	input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; +	input->cnt_index  = pf->fd_sb_cnt_idx; +	input->flow_type = fsp->flow_type; +	input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; + +	/* Reverse the src and dest notion, since the HW expects them to be from +	 * Tx perspective where as the input from user is from Rx filter view. +	 */ +	input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; +	input->src_port = fsp->h_u.tcp_ip4_spec.pdst; +	input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; +	input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + +	ret = i40e_add_del_fdir(vsi, input, true); +	if (ret) +		kfree(input); +	else +		i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);  	return ret;  } +  /**   * i40e_set_rxnfc - command to set RX flow classification rules   * @netdev: network interface device structure @@ -1404,10 +1629,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)  		ret = i40e_set_rss_hash_opt(pf, cmd);  		break;  	case ETHTOOL_SRXCLSRLINS: -		ret = i40e_add_del_fdir_ethtool(vsi, cmd, true); +		ret = i40e_add_fdir_ethtool(vsi, cmd);  		break;  	case ETHTOOL_SRXCLSRLDEL: -		ret = i40e_add_del_fdir_ethtool(vsi, cmd, false); +		ret = i40e_del_fdir_entry(vsi, cmd);  		break;  	default:  		break; @@ -1416,6 +1641,94 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)  	return ret;  } +/** + * i40e_max_channels - get Max number of combined channels supported + * @vsi: vsi pointer + **/ +static unsigned int i40e_max_channels(struct i40e_vsi *vsi) +{ +	/* TODO: This code assumes DCB and FD is disabled for now. */ +	return vsi->alloc_queue_pairs; +} + +/** + * i40e_get_channels - Get the current channels enabled and max supported etc. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * We don't support separate tx and rx queues as channels. The other count + * represents how many queues are being used for control. max_combined counts + * how many queue pairs we can support. They may not be mapped 1 to 1 with + * q_vectors since we support a lot more queue pairs than q_vectors. + **/ +static void i40e_get_channels(struct net_device *dev, +			       struct ethtool_channels *ch) +{ +	struct i40e_netdev_priv *np = netdev_priv(dev); +	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; + +	/* report maximum channels */ +	ch->max_combined = i40e_max_channels(vsi); + +	/* report info for other vector */ +	ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; +	ch->max_other = ch->other_count; + +	/* Note: This code assumes DCB is disabled for now. */ +	ch->combined_count = vsi->num_queue_pairs; +} + +/** + * i40e_set_channels - Set the new channels count. + * @netdev: network interface device structure + * @ch: ethtool channels structure + * + * The new channels count may not be the same as requested by the user + * since it gets rounded down to a power of 2 value. + **/ +static int i40e_set_channels(struct net_device *dev, +			      struct ethtool_channels *ch) +{ +	struct i40e_netdev_priv *np = netdev_priv(dev); +	unsigned int count = ch->combined_count; +	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; +	int new_count; + +	/* We do not support setting channels for any other VSI at present */ +	if (vsi->type != I40E_VSI_MAIN) +		return -EINVAL; + +	/* verify they are not requesting separate vectors */ +	if (!count || ch->rx_count || ch->tx_count) +		return -EINVAL; + +	/* verify other_count has not changed */ +	if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) +		return -EINVAL; + +	/* verify the number of channels does not exceed hardware limits */ +	if (count > i40e_max_channels(vsi)) +		return -EINVAL; + +	/* update feature limits from largest to smallest supported values */ +	/* TODO: Flow director limit, DCB etc */ + +	/* cap RSS limit */ +	if (count > pf->rss_size_max) +		count = pf->rss_size_max; + +	/* use rss_reconfig to rebuild with new queue count and update traffic +	 * class queue mapping +	 */ +	new_count = i40e_reconfig_rss_queues(pf, count); +	if (new_count > 0) +		return 0; +	else +		return -EINVAL; +} +  static const struct ethtool_ops i40e_ethtool_ops = {  	.get_settings		= i40e_get_settings,  	.get_drvinfo		= i40e_get_drvinfo, @@ -1424,6 +1737,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {  	.nway_reset		= i40e_nway_reset,  	.get_link		= ethtool_op_get_link,  	.get_wol		= i40e_get_wol, +	.set_wol		= i40e_set_wol,  	.get_eeprom_len		= i40e_get_eeprom_len,  	.get_eeprom		= i40e_get_eeprom,  	.get_ringparam		= i40e_get_ringparam, @@ -1440,10 +1754,12 @@ static const struct ethtool_ops i40e_ethtool_ops = {  	.get_ethtool_stats	= i40e_get_ethtool_stats,  	.get_coalesce		= i40e_get_coalesce,  	.set_coalesce		= i40e_set_coalesce, +	.get_channels		= i40e_get_channels, +	.set_channels		= i40e_set_channels,  	.get_ts_info		= i40e_get_ts_info,  };  void i40e_set_ethtool_ops(struct net_device *netdev)  { -	SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops); +	netdev->ethtool_ops = &i40e_ethtool_ops;  } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 901804af8b0..9b987ccc9e8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -47,10 +46,10 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,  					      u64 direct_mode_sz)  {  	enum i40e_memory_type mem_type __attribute__((unused)); -	i40e_status ret_code = 0;  	struct i40e_hmc_sd_entry *sd_entry;  	bool dma_mem_alloc_done = false;  	struct i40e_dma_mem mem; +	i40e_status ret_code;  	u64 alloc_len;  	if (NULL == hmc_info->sd_table.sd_entry) { @@ -90,11 +89,9 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,  			sd_entry->u.pd_table.pd_entry =  				(struct i40e_hmc_pd_entry *)  				sd_entry->u.pd_table.pd_entry_virt_mem.va; -			memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, -			       sizeof(struct i40e_dma_mem)); +			sd_entry->u.pd_table.pd_page_addr = mem;  		} else { -			memcpy(&sd_entry->u.bp.addr, &mem, -			       sizeof(struct i40e_dma_mem)); +			sd_entry->u.bp.addr = mem;  			sd_entry->u.bp.sd_pd_index = sd_index;  		}  		/* initialize the sd entry */ @@ -165,7 +162,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,  		if (ret_code)  			goto exit; -		memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem)); +		pd_entry->bp.addr = mem;  		pd_entry->bp.sd_pd_index = pd_index;  		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;  		/* Set page address and valid bit */ @@ -204,7 +201,7 @@ exit:   **/  i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,  					struct i40e_hmc_info *hmc_info, -					u32 idx, bool is_pf) +					u32 idx)  {  	i40e_status ret_code = 0;  	struct i40e_hmc_pd_entry *pd_entry; @@ -240,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,  	pd_addr = (u64 *)pd_table->pd_page_addr.va;  	pd_addr += rel_pd_idx;  	memset(pd_addr, 0, sizeof(u64)); -	if (is_pf) -		I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); -	else -		I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id); +	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);  	/* free memory here */  	ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index aacd42a261e..b45d8fedc5e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -117,7 +116,6 @@ struct i40e_hmc_info {   * @hw: pointer to our hw struct   * @pa: pointer to physical address   * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id   * @type: if sd entry is direct or paged   **/  #define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\ @@ -139,7 +137,6 @@ struct i40e_hmc_info {   * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware   * @hw: pointer to our hw struct   * @sd_index: segment descriptor index - * @hmc_fn_id: hmc function id   * @type: if sd entry is direct or paged   **/  #define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\ @@ -160,18 +157,12 @@ struct i40e_hmc_info {   * @hw: pointer to our hw struct   * @sd_idx: segment descriptor index   * @pd_idx: page descriptor index - * @hmc_fn_id: hmc function id   **/  #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\  	wr32((hw), I40E_PFHMC_PDINV,					\  	    (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		\  	     ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) -#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id)	   \ -	wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \ -	     (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		   \ -	      ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) -  /**   * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit   * @hmc_info: pointer to the HMC configuration information structure @@ -230,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,  					      u32 pd_index);  i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,  					struct i40e_hmc_info *hmc_info, -					u32 idx, bool is_pf); +					u32 idx);  i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,  					     u32 idx);  i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index a695b91c9c7..870ab1ee072 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -398,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,  				/* remove the backing pages from pd_idx1 to i */  				while (i && (i > pd_idx1)) {  					i40e_remove_pd_bp(hw, info->hmc_info, -							  (i - 1), true); +							  (i - 1));  					i--;  				}  			} @@ -434,11 +433,7 @@ exit_sd_error:  				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));  			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));  			for (i = pd_idx1; i < pd_lmt1; i++) { -				i40e_remove_pd_bp( -					hw, -					info->hmc_info, -					i, -					true); +				i40e_remove_pd_bp(hw, info->hmc_info, i);  			}  			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));  			break; @@ -486,8 +481,7 @@ i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,  		/* Make one big object, a single SD */  		info.count = 1;  		ret_code = i40e_create_lan_hmc_object(hw, &info); -		if ((ret_code) && -		    (model == I40E_HMC_MODEL_DIRECT_PREFERRED)) +		if (ret_code && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))  			goto try_type_paged;  		else if (ret_code)  			goto configure_lan_hmc_out; @@ -618,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,  		pd_table =  			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;  		if (pd_table->pd_entry[rel_pd_idx].valid) { -			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, -						     j, true); +			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);  			if (ret_code)  				goto exit;  		} @@ -749,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {  	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },  	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },  	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 }, +	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },  	{ 0 }  }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index 00ff3500607..eb65fe23c4a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -57,6 +56,7 @@ struct i40e_hmc_obj_rxq {  	u8  tphdata_ena;  	u8  tphhead_ena;  	u8  lrxqthresh; +	u8  prefena;	/* NOTE: normally must be set to 1 at init */  };  /* Tx queue context data */ @@ -113,8 +113,8 @@ enum i40e_hmc_lan_object_size {  #define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512  #define I40E_HMC_OBJ_SIZE_TXQ         128  #define I40E_HMC_OBJ_SIZE_RXQ         32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT   32 +#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   64 +#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64  enum i40e_hmc_lan_rsrc_type {  	I40E_HMC_LAN_FULL  = 0, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 221aa479501..275ca9a1719 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -27,6 +26,10 @@  /* Local includes */  #include "i40e.h" +#include "i40e_diag.h" +#ifdef CONFIG_I40E_VXLAN +#include <net/vxlan.h> +#endif  const char i40e_driver_name[] = "i40e";  static const char i40e_driver_string[] = @@ -35,23 +38,25 @@ static const char i40e_driver_string[] =  #define DRV_KERN "-k"  #define DRV_VERSION_MAJOR 0 -#define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 9 +#define DRV_VERSION_MINOR 4 +#define DRV_VERSION_BUILD 10  #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \  	     __stringify(DRV_VERSION_MINOR) "." \  	     __stringify(DRV_VERSION_BUILD)    DRV_KERN  const char i40e_driver_version_str[] = DRV_VERSION; -static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation."; +static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";  /* a bit of forward declarations */  static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);  static void i40e_handle_reset_warning(struct i40e_pf *pf);  static int i40e_add_vsi(struct i40e_vsi *vsi);  static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); -static int i40e_setup_pf_switch(struct i40e_pf *pf); +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);  static int i40e_setup_misc_vector(struct i40e_pf *pf);  static void i40e_determine_queue_usage(struct i40e_pf *pf);  static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_fdir_sb_setup(struct i40e_pf *pf); +static int i40e_veb_get_bw_info(struct i40e_veb *veb);  /* i40e_pci_tbl - PCI Device ID Table   * @@ -61,16 +66,14 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);   *   Class, Class Mask, private data (not used) }   */  static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = { -	{PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0}, -	{PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, +	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},  	/* required last entry */  	{0, }  }; @@ -301,6 +304,7 @@ static void i40e_tx_timeout(struct net_device *netdev)  		break;  	default:  		netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); +		set_bit(__I40E_DOWN, &vsi->state);  		i40e_down(vsi);  		break;  	} @@ -347,14 +351,59 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)   **/  static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(  					     struct net_device *netdev, -					     struct rtnl_link_stats64 *storage) +					     struct rtnl_link_stats64 *stats)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_ring *tx_ring, *rx_ring;  	struct i40e_vsi *vsi = np->vsi; +	struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); +	int i; + +	if (test_bit(__I40E_DOWN, &vsi->state)) +		return stats; + +	if (!vsi->tx_rings) +		return stats; + +	rcu_read_lock(); +	for (i = 0; i < vsi->num_queue_pairs; i++) { +		u64 bytes, packets; +		unsigned int start; + +		tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); +		if (!tx_ring) +			continue; -	*storage = *i40e_get_vsi_stats_struct(vsi); +		do { +			start = u64_stats_fetch_begin_irq(&tx_ring->syncp); +			packets = tx_ring->stats.packets; +			bytes   = tx_ring->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); + +		stats->tx_packets += packets; +		stats->tx_bytes   += bytes; +		rx_ring = &tx_ring[1]; + +		do { +			start = u64_stats_fetch_begin_irq(&rx_ring->syncp); +			packets = rx_ring->stats.packets; +			bytes   = rx_ring->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + +		stats->rx_packets += packets; +		stats->rx_bytes   += bytes; +	} +	rcu_read_unlock(); + +	/* following stats updated by i40e_watchdog_subtask() */ +	stats->multicast	= vsi_stats->multicast; +	stats->tx_errors	= vsi_stats->tx_errors; +	stats->tx_dropped	= vsi_stats->tx_dropped; +	stats->rx_errors	= vsi_stats->rx_errors; +	stats->rx_crc_errors	= vsi_stats->rx_crc_errors; +	stats->rx_length_errors	= vsi_stats->rx_length_errors; -	return storage; +	return stats;  }  /** @@ -374,13 +423,18 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)  	memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));  	memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));  	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); -	if (vsi->rx_rings) +	if (vsi->rx_rings && vsi->rx_rings[0]) {  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			memset(&vsi->rx_rings[i].rx_stats, 0 , -			       sizeof(vsi->rx_rings[i].rx_stats)); -			memset(&vsi->tx_rings[i].tx_stats, 0, -			       sizeof(vsi->tx_rings[i].tx_stats)); +			memset(&vsi->rx_rings[i]->stats, 0 , +			       sizeof(vsi->rx_rings[i]->stats)); +			memset(&vsi->rx_rings[i]->rx_stats, 0 , +			       sizeof(vsi->rx_rings[i]->rx_stats)); +			memset(&vsi->tx_rings[i]->stats, 0 , +			       sizeof(vsi->tx_rings[i]->stats)); +			memset(&vsi->tx_rings[i]->tx_stats, 0, +			       sizeof(vsi->tx_rings[i]->tx_stats));  		} +	}  	vsi->stat_offsets_loaded = false;  } @@ -415,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,  {  	u64 new_data; -	if (hw->device_id == I40E_QEMU_DEVICE_ID) { +	if (hw->device_id == I40E_DEV_ID_QEMU) {  		new_data = rd32(hw, loreg);  		new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;  	} else { @@ -474,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)  	i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),  			   vsi->stat_offsets_loaded,  			   &oes->rx_discards, &es->rx_discards); +	i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx), +			   vsi->stat_offsets_loaded, +			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol); +	i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), +			   vsi->stat_offsets_loaded, +			   &oes->tx_errors, &es->tx_errors);  	i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),  			   I40E_GLV_GORCL(stat_idx), @@ -531,10 +591,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)  	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),  			   veb->stat_offsets_loaded,  			   &oes->tx_discards, &es->tx_discards); -	i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), -			   veb->stat_offsets_loaded, -			   &oes->rx_unknown_protocol, &es->rx_unknown_protocol); - +	if (hw->revision_id > 0) +		i40e_stat_update32(hw, I40E_GLSW_RUPP(idx), +				   veb->stat_offsets_loaded, +				   &oes->rx_unknown_protocol, +				   &es->rx_unknown_protocol);  	i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),  			   veb->stat_offsets_loaded,  			   &oes->rx_bytes, &es->rx_bytes); @@ -591,14 +652,14 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)  		return;  	/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */ -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		struct i40e_vsi *vsi = pf->vsi[v]; -		if (!vsi) +		if (!vsi || !vsi->tx_rings[0])  			continue;  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			struct i40e_ring *ring = &vsi->tx_rings[i]; +			struct i40e_ring *ring = vsi->tx_rings[i];  			clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);  		}  	} @@ -645,14 +706,14 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)  	}  	/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */ -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		struct i40e_vsi *vsi = pf->vsi[v]; -		if (!vsi) +		if (!vsi || !vsi->tx_rings[0])  			continue;  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			struct i40e_ring *ring = &vsi->tx_rings[i]; +			struct i40e_ring *ring = vsi->tx_rings[i];  			tc = ring->dcb_tc;  			if (xoff[tc]) @@ -663,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)  }  /** - * i40e_update_stats - Update the board statistics counters. + * i40e_update_vsi_stats - Update the vsi statistics counters.   * @vsi: the VSI to be updated   *   * There are a few instances where we store the same stat in a   * couple of different structs.  This is partly because we have   * the netdev stats that need to be filled out, which is slightly   * different from the "eth_stats" defined by the chip and used in - * VF communications.  We sort it all out here in a central place. + * VF communications.  We sort it out here.   **/ -void i40e_update_stats(struct i40e_vsi *vsi) +static void i40e_update_vsi_stats(struct i40e_vsi *vsi)  {  	struct i40e_pf *pf = vsi->back; -	struct i40e_hw *hw = &pf->hw;  	struct rtnl_link_stats64 *ons;  	struct rtnl_link_stats64 *ns;   /* netdev stats */  	struct i40e_eth_stats *oes; @@ -684,7 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)  	u32 rx_page, rx_buf;  	u64 rx_p, rx_b;  	u64 tx_p, tx_b; -	int i;  	u16 q;  	if (test_bit(__I40E_DOWN, &vsi->state) || @@ -704,21 +763,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)  	tx_restart = tx_busy = 0;  	rx_page = 0;  	rx_buf = 0; +	rcu_read_lock();  	for (q = 0; q < vsi->num_queue_pairs; q++) {  		struct i40e_ring *p; +		u64 bytes, packets; +		unsigned int start; -		p = &vsi->rx_rings[q]; -		rx_b += p->rx_stats.bytes; -		rx_p += p->rx_stats.packets; -		rx_buf += p->rx_stats.alloc_rx_buff_failed; -		rx_page += p->rx_stats.alloc_rx_page_failed; +		/* locate Tx ring */ +		p = ACCESS_ONCE(vsi->tx_rings[q]); -		p = &vsi->tx_rings[q]; -		tx_b += p->tx_stats.bytes; -		tx_p += p->tx_stats.packets; +		do { +			start = u64_stats_fetch_begin_irq(&p->syncp); +			packets = p->stats.packets; +			bytes = p->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&p->syncp, start)); +		tx_b += bytes; +		tx_p += packets;  		tx_restart += p->tx_stats.restart_queue;  		tx_busy += p->tx_stats.tx_busy; -	} + +		/* Rx queue is part of the same block as Tx queue */ +		p = &p[1]; +		do { +			start = u64_stats_fetch_begin_irq(&p->syncp); +			packets = p->stats.packets; +			bytes = p->stats.bytes; +		} while (u64_stats_fetch_retry_irq(&p->syncp, start)); +		rx_b += bytes; +		rx_p += packets; +		rx_buf += p->rx_stats.alloc_buff_failed; +		rx_page += p->rx_stats.alloc_page_failed; +	} +	rcu_read_unlock();  	vsi->tx_restart = tx_restart;  	vsi->tx_busy = tx_busy;  	vsi->rx_page_failed = rx_page; @@ -729,182 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)  	ns->tx_packets = tx_p;  	ns->tx_bytes = tx_b; -	i40e_update_eth_stats(vsi);  	/* update netdev stats from eth stats */ -	ons->rx_errors = oes->rx_errors; -	ns->rx_errors = es->rx_errors; +	i40e_update_eth_stats(vsi);  	ons->tx_errors = oes->tx_errors;  	ns->tx_errors = es->tx_errors;  	ons->multicast = oes->rx_multicast;  	ns->multicast = es->rx_multicast; +	ons->rx_dropped = oes->rx_discards; +	ns->rx_dropped = es->rx_discards;  	ons->tx_dropped = oes->tx_discards;  	ns->tx_dropped = es->tx_discards; -	/* Get the port data only if this is the main PF VSI */ +	/* pull in a couple PF stats if this is the main vsi */  	if (vsi == pf->vsi[pf->lan_vsi]) { -		struct i40e_hw_port_stats *nsd = &pf->stats; -		struct i40e_hw_port_stats *osd = &pf->stats_offsets; +		ns->rx_crc_errors = pf->stats.crc_errors; +		ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; +		ns->rx_length_errors = pf->stats.rx_length_errors; +	} +} -		i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), -				   I40E_GLPRT_GORCL(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->eth.rx_bytes, &nsd->eth.rx_bytes); -		i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), -				   I40E_GLPRT_GOTCL(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->eth.tx_bytes, &nsd->eth.tx_bytes); -		i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->eth.rx_discards, -				   &nsd->eth.rx_discards); -		i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->eth.tx_discards, -				   &nsd->eth.tx_discards); -		i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), -				   I40E_GLPRT_MPRCL(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->eth.rx_multicast, -				   &nsd->eth.rx_multicast); +/** + * i40e_update_pf_stats - Update the pf statistics counters. + * @pf: the PF to be updated + **/ +static void i40e_update_pf_stats(struct i40e_pf *pf) +{ +	struct i40e_hw_port_stats *osd = &pf->stats_offsets; +	struct i40e_hw_port_stats *nsd = &pf->stats; +	struct i40e_hw *hw = &pf->hw; +	u32 val; +	int i; -		i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_dropped_link_down, -				   &nsd->tx_dropped_link_down); +	i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), +			   I40E_GLPRT_GORCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes); +	i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), +			   I40E_GLPRT_GOTCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes); +	i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.rx_discards, +			   &nsd->eth.rx_discards); +	i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.tx_discards, +			   &nsd->eth.tx_discards); -		i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->crc_errors, &nsd->crc_errors); -		ns->rx_crc_errors = nsd->crc_errors; +	i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), +			   I40E_GLPRT_UPRCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.rx_unicast, +			   &nsd->eth.rx_unicast); +	i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), +			   I40E_GLPRT_MPRCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.rx_multicast, +			   &nsd->eth.rx_multicast); +	i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), +			   I40E_GLPRT_BPRCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.rx_broadcast, +			   &nsd->eth.rx_broadcast); +	i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), +			   I40E_GLPRT_UPTCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.tx_unicast, +			   &nsd->eth.tx_unicast); +	i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), +			   I40E_GLPRT_MPTCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.tx_multicast, +			   &nsd->eth.tx_multicast); +	i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), +			   I40E_GLPRT_BPTCL(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->eth.tx_broadcast, +			   &nsd->eth.tx_broadcast); -		i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->illegal_bytes, &nsd->illegal_bytes); -		ns->rx_errors = nsd->crc_errors -				+ nsd->illegal_bytes; +	i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_dropped_link_down, +			   &nsd->tx_dropped_link_down); -		i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->mac_local_faults, -				   &nsd->mac_local_faults); -		i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->mac_remote_faults, -				   &nsd->mac_remote_faults); +	i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->crc_errors, &nsd->crc_errors); -		i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_length_errors, -				   &nsd->rx_length_errors); -		ns->rx_length_errors = nsd->rx_length_errors; +	i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->illegal_bytes, &nsd->illegal_bytes); -		i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->link_xon_rx, &nsd->link_xon_rx); -		i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->link_xon_tx, &nsd->link_xon_tx); -		i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */ -		i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->link_xoff_tx, &nsd->link_xoff_tx); - -		for (i = 0; i < 8; i++) { -			i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), -					   pf->stat_offsets_loaded, -					   &osd->priority_xon_rx[i], -					   &nsd->priority_xon_rx[i]); -			i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), -					   pf->stat_offsets_loaded, -					   &osd->priority_xon_tx[i], -					   &nsd->priority_xon_tx[i]); -			i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), -					   pf->stat_offsets_loaded, -					   &osd->priority_xoff_tx[i], -					   &nsd->priority_xoff_tx[i]); -			i40e_stat_update32(hw, -					   I40E_GLPRT_RXON2OFFCNT(hw->port, i), -					   pf->stat_offsets_loaded, -					   &osd->priority_xon_2_xoff[i], -					   &nsd->priority_xon_2_xoff[i]); -		} +	i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->mac_local_faults, +			   &nsd->mac_local_faults); +	i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->mac_remote_faults, +			   &nsd->mac_remote_faults); -		i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), -				   I40E_GLPRT_PRC64L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_64, &nsd->rx_size_64); -		i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), -				   I40E_GLPRT_PRC127L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_127, &nsd->rx_size_127); -		i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), -				   I40E_GLPRT_PRC255L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_255, &nsd->rx_size_255); -		i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), -				   I40E_GLPRT_PRC511L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_511, &nsd->rx_size_511); -		i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), -				   I40E_GLPRT_PRC1023L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_1023, &nsd->rx_size_1023); -		i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), -				   I40E_GLPRT_PRC1522L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_1522, &nsd->rx_size_1522); -		i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), -				   I40E_GLPRT_PRC9522L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->rx_size_big, &nsd->rx_size_big); +	i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_length_errors, +			   &nsd->rx_length_errors); -		i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), -				   I40E_GLPRT_PTC64L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_64, &nsd->tx_size_64); -		i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), -				   I40E_GLPRT_PTC127L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_127, &nsd->tx_size_127); -		i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), -				   I40E_GLPRT_PTC255L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_255, &nsd->tx_size_255); -		i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), -				   I40E_GLPRT_PTC511L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_511, &nsd->tx_size_511); -		i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), -				   I40E_GLPRT_PTC1023L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_1023, &nsd->tx_size_1023); -		i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), -				   I40E_GLPRT_PTC1522L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_1522, &nsd->tx_size_1522); -		i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), -				   I40E_GLPRT_PTC9522L(hw->port), -				   pf->stat_offsets_loaded, -				   &osd->tx_size_big, &nsd->tx_size_big); +	i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->link_xon_rx, &nsd->link_xon_rx); +	i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->link_xon_tx, &nsd->link_xon_tx); +	i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */ +	i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->link_xoff_tx, &nsd->link_xoff_tx); -		i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), +	for (i = 0; i < 8; i++) { +		i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),  				   pf->stat_offsets_loaded, -				   &osd->rx_undersize, &nsd->rx_undersize); -		i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), +				   &osd->priority_xon_rx[i], +				   &nsd->priority_xon_rx[i]); +		i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),  				   pf->stat_offsets_loaded, -				   &osd->rx_fragments, &nsd->rx_fragments); -		i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), +				   &osd->priority_xon_tx[i], +				   &nsd->priority_xon_tx[i]); +		i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),  				   pf->stat_offsets_loaded, -				   &osd->rx_oversize, &nsd->rx_oversize); -		i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), +				   &osd->priority_xoff_tx[i], +				   &nsd->priority_xoff_tx[i]); +		i40e_stat_update32(hw, +				   I40E_GLPRT_RXON2OFFCNT(hw->port, i),  				   pf->stat_offsets_loaded, -				   &osd->rx_jabber, &nsd->rx_jabber); +				   &osd->priority_xon_2_xoff[i], +				   &nsd->priority_xon_2_xoff[i]);  	} +	i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), +			   I40E_GLPRT_PRC64L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_64, &nsd->rx_size_64); +	i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), +			   I40E_GLPRT_PRC127L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_127, &nsd->rx_size_127); +	i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), +			   I40E_GLPRT_PRC255L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_255, &nsd->rx_size_255); +	i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), +			   I40E_GLPRT_PRC511L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_511, &nsd->rx_size_511); +	i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), +			   I40E_GLPRT_PRC1023L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_1023, &nsd->rx_size_1023); +	i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), +			   I40E_GLPRT_PRC1522L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_1522, &nsd->rx_size_1522); +	i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), +			   I40E_GLPRT_PRC9522L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_size_big, &nsd->rx_size_big); + +	i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), +			   I40E_GLPRT_PTC64L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_64, &nsd->tx_size_64); +	i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), +			   I40E_GLPRT_PTC127L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_127, &nsd->tx_size_127); +	i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), +			   I40E_GLPRT_PTC255L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_255, &nsd->tx_size_255); +	i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), +			   I40E_GLPRT_PTC511L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_511, &nsd->tx_size_511); +	i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), +			   I40E_GLPRT_PTC1023L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_1023, &nsd->tx_size_1023); +	i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), +			   I40E_GLPRT_PTC1522L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_1522, &nsd->tx_size_1522); +	i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), +			   I40E_GLPRT_PTC9522L(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->tx_size_big, &nsd->tx_size_big); + +	i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_undersize, &nsd->rx_undersize); +	i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_fragments, &nsd->rx_fragments); +	i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_oversize, &nsd->rx_oversize); +	i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), +			   pf->stat_offsets_loaded, +			   &osd->rx_jabber, &nsd->rx_jabber); + +	/* FDIR stats */ +	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), +			   pf->stat_offsets_loaded, +			   &osd->fd_atr_match, &nsd->fd_atr_match); +	i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), +			   pf->stat_offsets_loaded, +			   &osd->fd_sb_match, &nsd->fd_sb_match); + +	val = rd32(hw, I40E_PRTPM_EEE_STAT); +	nsd->tx_lpi_status = +		       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >> +			I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT; +	nsd->rx_lpi_status = +		       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >> +			I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT; +	i40e_stat_update32(hw, I40E_PRTPM_TLPIC, +			   pf->stat_offsets_loaded, +			   &osd->tx_lpi_count, &nsd->tx_lpi_count); +	i40e_stat_update32(hw, I40E_PRTPM_RLPIC, +			   pf->stat_offsets_loaded, +			   &osd->rx_lpi_count, &nsd->rx_lpi_count); +  	pf->stat_offsets_loaded = true;  }  /** + * i40e_update_stats - Update the various statistics counters. + * @vsi: the VSI to be updated + * + * Update the various stats for this VSI and its related entities. + **/ +void i40e_update_stats(struct i40e_vsi *vsi) +{ +	struct i40e_pf *pf = vsi->back; + +	if (vsi == pf->vsi[pf->lan_vsi]) +		i40e_update_pf_stats(pf); + +	i40e_update_vsi_stats(vsi); +} + +/**   * i40e_find_filter - Search VSI filter list for specific mac/vlan filter   * @vsi: the VSI to be searched   * @macaddr: the MAC address @@ -1002,7 +1152,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,  		if (!i40e_find_filter(vsi, macaddr, f->vlan,  				      is_vf, is_netdev)) {  			if (!i40e_add_filter(vsi, macaddr, f->vlan, -						is_vf, is_netdev)) +					     is_vf, is_netdev))  				return NULL;  		}  	} @@ -1012,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,  }  /** + * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM + * @vsi: the PF Main VSI - inappropriate for any other VSI + * @macaddr: the MAC address + **/ +static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +{ +	struct i40e_aqc_remove_macvlan_element_data element; +	struct i40e_pf *pf = vsi->back; +	i40e_status aq_ret; + +	/* Only appropriate for the PF main VSI */ +	if (vsi->type != I40E_VSI_MAIN) +		return; + +	ether_addr_copy(element.mac_addr, macaddr); +	element.vlan_tag = 0; +	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | +			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; +	aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); +	if (aq_ret) +		dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n"); +} + +/**   * i40e_add_filter - Add a mac/vlan filter to the VSI   * @vsi: the VSI to be searched   * @macaddr: the MAC address @@ -1036,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,  		if (!f)  			goto add_filter_out; -		memcpy(f->macaddr, macaddr, ETH_ALEN); +		ether_addr_copy(f->macaddr, macaddr);  		f->vlan = vlan;  		f->changed = true; @@ -1144,6 +1318,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))  		return 0; +	if (test_bit(__I40E_DOWN, &vsi->back->state) || +	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) +		return -EADDRNOTAVAIL; +  	if (vsi->type == I40E_VSI_MAIN) {  		i40e_status ret;  		ret = i40e_aq_mac_address_write(&vsi->back->hw, @@ -1156,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  			return -EADDRNOTAVAIL;  		} -		memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len); +		ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);  	}  	/* In order to be sure to not drop any packets, add the new address @@ -1170,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)  	i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);  	i40e_sync_vsi_filters(vsi); -	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); +	ether_addr_copy(netdev->dev_addr, addr->sa_data);  	return 0;  } @@ -1197,6 +1375,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,  	u8 offset;  	u16 qmap;  	int i; +	u16 num_tc_qps = 0;  	sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;  	offset = 0; @@ -1218,6 +1397,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,  	vsi->tc_config.numtc = numtc;  	vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; +	/* Number of queues per enabled TC */ +	num_tc_qps = vsi->alloc_queue_pairs/numtc; +	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);  	/* Setup queue offset/count for all TCs for given VSI */  	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -1225,30 +1407,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,  		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */  			int pow, num_qps; -			vsi->tc_config.tc_info[i].qoffset = offset;  			switch (vsi->type) {  			case I40E_VSI_MAIN: -				if (i == 0) -					qcount = pf->rss_size; -				else -					qcount = pf->num_tc_qps; -				vsi->tc_config.tc_info[i].qcount = qcount; +				qcount = min_t(int, pf->rss_size, num_tc_qps);  				break;  			case I40E_VSI_FDIR:  			case I40E_VSI_SRIOV:  			case I40E_VSI_VMDQ2:  			default: -				qcount = vsi->alloc_queue_pairs; -				vsi->tc_config.tc_info[i].qcount = qcount; +				qcount = num_tc_qps;  				WARN_ON(i != 0);  				break;  			} +			vsi->tc_config.tc_info[i].qoffset = offset; +			vsi->tc_config.tc_info[i].qcount = qcount;  			/* find the power-of-2 of the number of queue pairs */ -			num_qps = vsi->tc_config.tc_info[i].qcount; +			num_qps = qcount;  			pow = 0; -			while (num_qps && -			      ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) { +			while (num_qps && ((1 << pow) < qcount)) {  				pow++;  				num_qps >>= 1;  			} @@ -1258,7 +1435,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,  			    (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |  			    (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); -			offset += vsi->tc_config.tc_info[i].qcount; +			offset += qcount;  		} else {  			/* TC is not enabled so set the offset to  			 * default queue and allocate one queue @@ -1428,17 +1605,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			cmd_flags = 0;  			/* add to delete list */ -			memcpy(del_list[num_del].mac_addr, -			       f->macaddr, ETH_ALEN); +			ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);  			del_list[num_del].vlan_tag =  				cpu_to_le16((u16)(f->vlan ==  					    I40E_VLAN_ANY ? 0 : f->vlan)); -			/* vlan0 as wild card to allow packets from all vlans */ -			if (f->vlan == I40E_VLAN_ANY || -			    (vsi->netdev && !(vsi->netdev->features & -					      NETIF_F_HW_VLAN_CTAG_FILTER))) -				cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;  			cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;  			del_list[num_del].flags = cmd_flags;  			num_del++; @@ -1455,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  				num_del = 0;  				memset(del_list, 0, sizeof(*del_list)); -				if (aq_ret) +				if (aq_ret && +				    pf->hw.aq.asq_last_status != +							      I40E_AQ_RC_ENOENT)  					dev_info(&pf->pdev->dev,  						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",  						 aq_ret, @@ -1467,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  						     del_list, num_del, NULL);  			num_del = 0; -			if (aq_ret) +			if (aq_ret && +			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)  				dev_info(&pf->pdev->dev,  					 "ignoring delete macvlan error, err %d, aq_err %d\n",  					 aq_ret, pf->hw.aq.asq_last_status); @@ -1496,20 +1670,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			cmd_flags = 0;  			/* add to add array */ -			memcpy(add_list[num_add].mac_addr, -			       f->macaddr, ETH_ALEN); +			ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);  			add_list[num_add].vlan_tag =  				cpu_to_le16(  				 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));  			add_list[num_add].queue_number = 0;  			cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; - -			/* vlan0 as wild card to allow packets from all vlans */ -			if (f->vlan == I40E_VLAN_ANY || (vsi->netdev && -			    !(vsi->netdev->features & -						 NETIF_F_HW_VLAN_CTAG_FILTER))) -				cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;  			add_list[num_add].flags = cpu_to_le16(cmd_flags);  			num_add++; @@ -1575,6 +1742,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)  			dev_info(&pf->pdev->dev,  				 "set uni promisc failed, err %d, aq_err %d\n",  				 aq_ret, pf->hw.aq.asq_last_status); +		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, +						   vsi->seid, +						   cur_promisc, NULL); +		if (aq_ret) +			dev_info(&pf->pdev->dev, +				 "set brdcast promisc failed, err %d, aq_err %d\n", +				 aq_ret, pf->hw.aq.asq_last_status);  	}  	clear_bit(__I40E_CONFIG_BUSY, &vsi->state); @@ -1593,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)  		return;  	pf->flags &= ~I40E_FLAG_FILTER_SYNC; -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		if (pf->vsi[v] &&  		    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))  			i40e_sync_vsi_filters(pf->vsi[v]); @@ -1610,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)  static int i40e_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev); -	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; +	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;  	struct i40e_vsi *vsi = np->vsi;  	/* MTU < 68 is an error and causes problems on some kernels */ @@ -1627,6 +1801,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)  }  /** + * i40e_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + **/ +int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; + +	switch (cmd) { +	case SIOCGHWTSTAMP: +		return i40e_ptp_get_ts_config(pf, ifr); +	case SIOCSHWTSTAMP: +		return i40e_ptp_set_ts_config(pf, ifr); +	default: +		return -EOPNOTSUPP; +	} +} + +/**   * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI   * @vsi: the vsi being adjusted   **/ @@ -1708,7 +1903,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  {  	struct i40e_mac_filter *f, *add_f;  	bool is_netdev, is_vf; -	int ret;  	is_vf = (vsi->type == I40E_VSI_SRIOV);  	is_netdev = !!(vsi->netdev); @@ -1734,13 +1928,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  		}  	} -	ret = i40e_sync_vsi_filters(vsi); -	if (ret) { -		dev_info(&vsi->back->pdev->dev, -			 "Could not sync filters for vid %d\n", vid); -		return ret; -	} -  	/* Now if we add a vlan tag, make sure to check if it is the first  	 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"  	 * with 0, so we now accept untagged and specified tagged traffic @@ -1761,7 +1948,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  				return -ENOMEM;  			}  		} +	} +	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ +	if (vid > 0 && !vsi->info.pvid) {  		list_for_each_entry(f, &vsi->mac_filter_list, list) {  			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,  					     is_vf, is_netdev)) { @@ -1777,10 +1967,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)  				}  			}  		} -		ret = i40e_sync_vsi_filters(vsi);  	} -	return ret; +	if (test_bit(__I40E_DOWN, &vsi->back->state) || +	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) +		return 0; + +	return i40e_sync_vsi_filters(vsi);  }  /** @@ -1796,7 +1989,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  	struct i40e_mac_filter *f, *add_f;  	bool is_vf, is_netdev;  	int filter_count = 0; -	int ret;  	is_vf = (vsi->type == I40E_VSI_SRIOV);  	is_netdev = !!(netdev); @@ -1807,12 +1999,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  	list_for_each_entry(f, &vsi->mac_filter_list, list)  		i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); -	ret = i40e_sync_vsi_filters(vsi); -	if (ret) { -		dev_info(&vsi->back->pdev->dev, "Could not sync filters\n"); -		return ret; -	} -  	/* go through all the filters for this VSI and if there is only  	 * vid == 0 it means there are no other filters, so vid 0 must  	 * be replaced with -1. This signifies that we should from now @@ -1855,6 +2041,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)  		}  	} +	if (test_bit(__I40E_DOWN, &vsi->back->state) || +	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) +		return 0; +  	return i40e_sync_vsi_filters(vsi);  } @@ -1877,11 +2067,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,  	netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); -	/* If the network stack called us with vid = 0, we should -	 * indicate to i40e_vsi_add_vlan() that we want to receive -	 * any traffic (i.e. with any vlan tag, or untagged) +	/* If the network stack called us with vid = 0 then +	 * it is asking to receive priority tagged packets with +	 * vlan id 0.  Our HW receives them by default when configured +	 * to receive untagged packets so there is no need to add an +	 * extra filter for vlan 0 tagged packets.  	 */ -	ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); +	if (vid) +		ret = i40e_vsi_add_vlan(vsi, vid);  	if (!ret && (vid < VLAN_N_VID))  		set_bit(vid, vsi->active_vlans); @@ -1894,7 +2087,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,   * @netdev: network interface to be adjusted   * @vid: vlan id to be removed   * - * net_device_ops implementation for adding vlan ids + * net_device_ops implementation for removing vlan ids   **/  static int i40e_vlan_rx_kill_vid(struct net_device *netdev,  				 __always_unused __be16 proto, u16 vid) @@ -1945,8 +2138,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)  	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);  	vsi->info.pvid = cpu_to_le16(vid); -	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID; -	vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED; +	vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | +				    I40E_AQ_VSI_PVLAN_INSERT_PVID | +				    I40E_AQ_VSI_PVLAN_EMOD_STR;  	ctxt.seid = vsi->seid;  	memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); @@ -1969,8 +2163,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)   **/  void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)  { +	i40e_vlan_stripping_disable(vsi); +  	vsi->info.pvid = 0; -	i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);  }  /** @@ -1988,7 +2183,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)  	int i, err = 0;  	for (i = 0; i < vsi->num_queue_pairs && !err; i++) -		err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); +		err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);  	return err;  } @@ -2003,9 +2198,12 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)  {  	int i; +	if (!vsi->tx_rings) +		return; +  	for (i = 0; i < vsi->num_queue_pairs; i++) -		if (vsi->tx_rings[i].desc) -			i40e_free_tx_resources(&vsi->tx_rings[i]); +		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) +			i40e_free_tx_resources(vsi->tx_rings[i]);  }  /** @@ -2023,7 +2221,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)  	int i, err = 0;  	for (i = 0; i < vsi->num_queue_pairs && !err; i++) -		err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); +		err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);  	return err;  } @@ -2037,9 +2235,12 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)  {  	int i; +	if (!vsi->rx_rings) +		return; +  	for (i = 0; i < vsi->num_queue_pairs; i++) -		if (vsi->rx_rings[i].desc) -			i40e_free_rx_resources(&vsi->rx_rings[i]); +		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) +			i40e_free_rx_resources(vsi->rx_rings[i]);  }  /** @@ -2058,7 +2259,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)  	u32 qtx_ctl = 0;  	/* some ATR related tx ring init */ -	if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) { +	if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {  		ring->atr_sample_rate = vsi->back->atr_sample_rate;  		ring->atr_count = 0;  	} else { @@ -2067,6 +2268,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)  	/* initialize XPS */  	if (ring->q_vector && ring->netdev && +	    vsi->tc_config.numtc <= 1 &&  	    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))  		netif_set_xps_queue(ring->netdev,  				    &ring->q_vector->affinity_mask, @@ -2078,8 +2280,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)  	tx_ctx.new_context = 1;  	tx_ctx.base = (ring->dma / 128);  	tx_ctx.qlen = ring->count; -	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED | -			I40E_FLAG_FDIR_ATR_ENABLED)); +	tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | +					       I40E_FLAG_FD_ATR_ENABLED)); +	tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); +	/* FDIR VSI tx ring can still use RS bit and writebacks */ +	if (vsi->type != I40E_VSI_FDIR) +		tx_ctx.head_wb_ena = 1; +	tx_ctx.head_wb_addr = ring->dma + +			      (ring->count * sizeof(struct i40e_tx_desc));  	/* As part of VSI creation/update, FW allocates certain  	 * Tx arbitration queue sets for each TC enabled for @@ -2113,9 +2321,12 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)  	}  	/* Now associate this queue with this PCI function */ -	qtx_ctl = I40E_QTX_CTL_PF_QUEUE; -	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) -						& I40E_QTX_CTL_PF_INDX_MASK); +	if (vsi->type == I40E_VSI_VMDQ2) +		qtx_ctl = I40E_QTX_CTL_VM_QUEUE; +	else +		qtx_ctl = I40E_QTX_CTL_PF_QUEUE; +	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & +		    I40E_QTX_CTL_PF_INDX_MASK);  	wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);  	i40e_flush(hw); @@ -2180,10 +2391,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)  	rx_ctx.tphwdesc_ena = 1;  	rx_ctx.tphdata_ena = 1;  	rx_ctx.tphhead_ena = 1; -	rx_ctx.lrxqthresh = 2; +	if (hw->revision_id == 0) +		rx_ctx.lrxqthresh = 0; +	else +		rx_ctx.lrxqthresh = 2;  	rx_ctx.crcstrip = 1;  	rx_ctx.l2tsel = 1;  	rx_ctx.showiv = 1; +	/* set the prefena field to 1 because the manual says to */ +	rx_ctx.prefena = 1;  	/* clear the context in the HMC */  	err = i40e_clear_lan_rx_queue_context(hw, pf_q); @@ -2223,8 +2439,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)  	int err = 0;  	u16 i; -	for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) -		err = i40e_configure_tx_ring(&vsi->tx_rings[i]); +	for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) +		err = i40e_configure_tx_ring(vsi->tx_rings[i]);  	return err;  } @@ -2274,7 +2490,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)  	/* set up individual rings */  	for (i = 0; i < vsi->num_queue_pairs && !err; i++) -		err = i40e_configure_rx_ring(&vsi->rx_rings[i]); +		err = i40e_configure_rx_ring(vsi->rx_rings[i]);  	return err;  } @@ -2285,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)   **/  static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)  { +	struct i40e_ring *tx_ring, *rx_ring;  	u16 qoffset, qcount;  	int i, n; @@ -2298,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)  		qoffset = vsi->tc_config.tc_info[n].qoffset;  		qcount = vsi->tc_config.tc_info[n].qcount;  		for (i = qoffset; i < (qoffset + qcount); i++) { -			struct i40e_ring *rx_ring = &vsi->rx_rings[i]; -			struct i40e_ring *tx_ring = &vsi->tx_rings[i]; +			rx_ring = vsi->rx_rings[i]; +			tx_ring = vsi->tx_rings[i];  			rx_ring->dcb_tc = n;  			tx_ring->dcb_tc = n;  		} @@ -2317,6 +2534,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)  }  /** + * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters + * @vsi: Pointer to the targeted VSI + * + * This function replays the hlist on the hw where all the SB Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) +{ +	struct i40e_fdir_filter *filter; +	struct i40e_pf *pf = vsi->back; +	struct hlist_node *node; + +	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) +		return; + +	hlist_for_each_entry_safe(filter, node, +				  &pf->fdir_filter_list, fdir_node) { +		i40e_add_del_fdir(vsi, filter, true); +	} +} + +/**   * i40e_vsi_configure - Set up the VSI for action   * @vsi: the VSI being configured   **/ @@ -2354,8 +2593,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)  	 */  	qp = vsi->base_queue;  	vector = vsi->base_vector; -	q_vector = vsi->q_vectors; -	for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) { +	for (i = 0; i < vsi->num_q_vectors; i++, vector++) { +		q_vector = vsi->q_vectors[i];  		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);  		q_vector->rx.latency_range = I40E_LOW_LATENCY;  		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), @@ -2414,7 +2653,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)  	      I40E_PFINT_ICR0_ENA_GRST_MASK          |  	      I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |  	      I40E_PFINT_ICR0_ENA_GPIO_MASK          | -	      I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK  | +	      I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |  	      I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |  	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |  	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK; @@ -2422,8 +2661,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)  	wr32(hw, I40E_PFINT_ICR0_ENA, val);  	/* SW_ITR_IDX = 0, but don't change INTENA */ -	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK | -					I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK); +	wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | +					I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);  	/* OTHER_ITR_IDX = 0 */  	wr32(hw, I40E_PFINT_STAT_CTL0, 0); @@ -2435,7 +2674,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)   **/  static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)  { -	struct i40e_q_vector *q_vector = vsi->q_vectors; +	struct i40e_q_vector *q_vector = vsi->q_vectors[0];  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	u32 val; @@ -2453,7 +2692,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)  	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */  	wr32(hw, I40E_PFINT_LNKLST0, 0); -	/* Associate the queue pair to the vector and enable the q int */ +	/* Associate the queue pair to the vector and enable the queue int */  	val = I40E_QINT_RQCTL_CAUSE_ENA_MASK		      |  	      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |  	      (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); @@ -2469,10 +2708,23 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)  }  /** + * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0 + * @pf: board private structure + **/ +void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) +{ +	struct i40e_hw *hw = &pf->hw; + +	wr32(hw, I40E_PFINT_DYN_CTL0, +	     I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); +	i40e_flush(hw); +} + +/**   * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0   * @pf: board private structure   **/ -static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)  {  	struct i40e_hw *hw = &pf->hw;  	u32 val; @@ -2500,7 +2752,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)  	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |  	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);  	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); -	i40e_flush(hw); +	/* skip the flush */  }  /** @@ -2512,7 +2764,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)  {  	struct i40e_q_vector *q_vector = data; -	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) +	if (!q_vector->tx.ring && !q_vector->rx.ring)  		return IRQ_HANDLED;  	napi_schedule(&q_vector->napi); @@ -2521,23 +2773,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)  }  /** - * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings - * @irq: interrupt number - * @data: pointer to a q_vector - **/ -static irqreturn_t i40e_fdir_clean_rings(int irq, void *data) -{ -	struct i40e_q_vector *q_vector = data; - -	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0]) -		return IRQ_HANDLED; - -	pr_info("fdir ring cleaning needed\n"); - -	return IRQ_HANDLED; -} - -/**   * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts   * @vsi: the VSI being configured   * @basename: name for the vector @@ -2554,16 +2789,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)  	int vector, err;  	for (vector = 0; vector < q_vectors; vector++) { -		struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]); +		struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; -		if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) { +		if (q_vector->tx.ring && q_vector->rx.ring) {  			snprintf(q_vector->name, sizeof(q_vector->name) - 1,  				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);  			tx_int_idx++; -		} else if (q_vector->rx.ring[0]) { +		} else if (q_vector->rx.ring) {  			snprintf(q_vector->name, sizeof(q_vector->name) - 1,  				 "%s-%s-%d", basename, "rx", rx_int_idx++); -		} else if (q_vector->tx.ring[0]) { +		} else if (q_vector->tx.ring) {  			snprintf(q_vector->name, sizeof(q_vector->name) - 1,  				 "%s-%s-%d", basename, "tx", tx_int_idx++);  		} else { @@ -2586,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)  				      &q_vector->affinity_mask);  	} +	vsi->irqs_ready = true;  	return 0;  free_queue_irqs: @@ -2611,8 +2847,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)  	int i;  	for (i = 0; i < vsi->num_queue_pairs; i++) { -		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); -		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); +		wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0); +		wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);  	}  	if (pf->flags & I40E_FLAG_MSIX_ENABLED) { @@ -2649,6 +2885,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)  		i40e_irq_dynamic_enable_icr0(pf);  	} +	i40e_flush(&pf->hw);  	return 0;  } @@ -2676,20 +2913,21 @@ static irqreturn_t i40e_intr(int irq, void *data)  {  	struct i40e_pf *pf = (struct i40e_pf *)data;  	struct i40e_hw *hw = &pf->hw; +	irqreturn_t ret = IRQ_NONE;  	u32 icr0, icr0_remaining;  	u32 val, ena_mask;  	icr0 = rd32(hw, I40E_PFINT_ICR0); +	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);  	/* if sharing a legacy IRQ, we might get called w/o an intr pending */  	if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0) -		return IRQ_NONE; +		goto enable_intr; -	val = rd32(hw, I40E_PFINT_DYN_CTL0); -	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; -	wr32(hw, I40E_PFINT_DYN_CTL0, val); - -	ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); +	/* if interrupt but no bits showing, must be SWINT */ +	if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) || +	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) +		pf->sw_int_count++;  	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */  	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { @@ -2702,10 +2940,9 @@ static irqreturn_t i40e_intr(int irq, void *data)  		qval = rd32(hw, I40E_QINT_TQCTL(0));  		qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;  		wr32(hw, I40E_QINT_TQCTL(0), qval); -		i40e_flush(hw);  		if (!test_bit(__I40E_DOWN, &pf->state)) -			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi); +			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);  	}  	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { @@ -2730,12 +2967,28 @@ static irqreturn_t i40e_intr(int irq, void *data)  		val = rd32(hw, I40E_GLGEN_RSTAT);  		val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)  		       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; -		if (val & I40E_RESET_CORER) +		if (val == I40E_RESET_CORER) {  			pf->corer_count++; -		else if (val & I40E_RESET_GLOBR) +		} else if (val == I40E_RESET_GLOBR) {  			pf->globr_count++; -		else if (val & I40E_RESET_EMPR) +		} else if (val == I40E_RESET_EMPR) {  			pf->empr_count++; +			set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); +		} +	} + +	if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { +		icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; +		dev_info(&pf->pdev->dev, "HMC error interrupt\n"); +	} + +	if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { +		u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); + +		if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { +			icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; +			i40e_ptp_tx_hwtstamp(pf); +		}  	}  	/* If a critical error is pending we have no choice but to reset the @@ -2746,68 +2999,137 @@ static irqreturn_t i40e_intr(int irq, void *data)  	if (icr0_remaining) {  		dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",  			 icr0_remaining); -		if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) || -		    (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || +		if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||  		    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || -		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || -		    (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) { -			if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { -				dev_info(&pf->pdev->dev, "HMC error interrupt\n"); -			} else { -				dev_info(&pf->pdev->dev, "device will be reset\n"); -				set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); -				i40e_service_event_schedule(pf); -			} +		    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { +			dev_info(&pf->pdev->dev, "device will be reset\n"); +			set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); +			i40e_service_event_schedule(pf);  		}  		ena_mask &= ~icr0_remaining;  	} +	ret = IRQ_HANDLED; +enable_intr:  	/* re-enable interrupt causes */  	wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); -	i40e_flush(hw);  	if (!test_bit(__I40E_DOWN, &pf->state)) {  		i40e_service_event_schedule(pf);  		i40e_irq_dynamic_enable_icr0(pf);  	} -	return IRQ_HANDLED; +	return ret;  }  /** - * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector - * @vsi: the VSI being configured - * @v_idx: vector index - * @r_idx: rx queue index + * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes + * @tx_ring:  tx ring to clean + * @budget:   how many cleans we're allowed + * + * Returns true if there's any budget left (e.g. the clean is finished)   **/ -static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx) +static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)  { -	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); -	struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]); +	struct i40e_vsi *vsi = tx_ring->vsi; +	u16 i = tx_ring->next_to_clean; +	struct i40e_tx_buffer *tx_buf; +	struct i40e_tx_desc *tx_desc; -	rx_ring->q_vector = q_vector; -	q_vector->rx.ring[q_vector->rx.count] = rx_ring; -	q_vector->rx.count++; -	q_vector->rx.latency_range = I40E_LOW_LATENCY; -	q_vector->vsi = vsi; +	tx_buf = &tx_ring->tx_bi[i]; +	tx_desc = I40E_TX_DESC(tx_ring, i); +	i -= tx_ring->count; + +	do { +		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; + +		/* if next_to_watch is not set then there is no work pending */ +		if (!eop_desc) +			break; + +		/* prevent any other reads prior to eop_desc */ +		read_barrier_depends(); + +		/* if the descriptor isn't done, no work yet to do */ +		if (!(eop_desc->cmd_type_offset_bsz & +		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) +			break; + +		/* clear next_to_watch to prevent false hangs */ +		tx_buf->next_to_watch = NULL; + +		/* unmap skb header data */ +		dma_unmap_single(tx_ring->dev, +				 dma_unmap_addr(tx_buf, dma), +				 dma_unmap_len(tx_buf, len), +				 DMA_TO_DEVICE); + +		dma_unmap_len_set(tx_buf, len, 0); + + +		/* move to the next desc and buffer to clean */ +		tx_buf++; +		tx_desc++; +		i++; +		if (unlikely(!i)) { +			i -= tx_ring->count; +			tx_buf = tx_ring->tx_bi; +			tx_desc = I40E_TX_DESC(tx_ring, 0); +		} + +		/* update budget accounting */ +		budget--; +	} while (likely(budget)); + +	i += tx_ring->count; +	tx_ring->next_to_clean = i; + +	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { +		i40e_irq_dynamic_enable(vsi, +				tx_ring->q_vector->v_idx + vsi->base_vector); +	} +	return budget > 0; +} + +/** + * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t i40e_fdir_clean_ring(int irq, void *data) +{ +	struct i40e_q_vector *q_vector = data; +	struct i40e_vsi *vsi; + +	if (!q_vector->tx.ring) +		return IRQ_HANDLED; + +	vsi = q_vector->tx.ring->vsi; +	i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); + +	return IRQ_HANDLED;  }  /** - * i40e_map_vector_to_txq - Assigns the Tx queue to the vector + * i40e_map_vector_to_qp - Assigns the queue pair to the vector   * @vsi: the VSI being configured   * @v_idx: vector index - * @t_idx: tx queue index + * @qp_idx: queue pair index   **/ -static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx) +static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)  { -	struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]); -	struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]); +	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; +	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; +	struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];  	tx_ring->q_vector = q_vector; -	q_vector->tx.ring[q_vector->tx.count] = tx_ring; +	tx_ring->next = q_vector->tx.ring; +	q_vector->tx.ring = tx_ring;  	q_vector->tx.count++; -	q_vector->tx.latency_range = I40E_LOW_LATENCY; -	q_vector->num_ringpairs++; -	q_vector->vsi = vsi; + +	rx_ring->q_vector = q_vector; +	rx_ring->next = q_vector->rx.ring; +	q_vector->rx.ring = rx_ring; +	q_vector->rx.count++;  }  /** @@ -2823,7 +3145,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)  {  	int qp_remaining = vsi->num_queue_pairs;  	int q_vectors = vsi->num_q_vectors; -	int qp_per_vector; +	int num_ringpairs;  	int v_start = 0;  	int qp_idx = 0; @@ -2831,11 +3153,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)  	 * group them so there are multiple queues per vector.  	 */  	for (; v_start < q_vectors && qp_remaining; v_start++) { -		qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); -		for (; qp_per_vector; -		     qp_per_vector--, qp_idx++, qp_remaining--)	{ -			map_vector_to_rxq(vsi, v_start, qp_idx); -			map_vector_to_txq(vsi, v_start, qp_idx); +		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; + +		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + +		q_vector->num_ringpairs = num_ringpairs; + +		q_vector->rx.count = 0; +		q_vector->tx.count = 0; +		q_vector->rx.ring = NULL; +		q_vector->tx.ring = NULL; + +		while (num_ringpairs--) { +			map_vector_to_qp(vsi, v_start, qp_idx); +			qp_idx++; +			qp_remaining--;  		}  	}  } @@ -2887,7 +3219,7 @@ static void i40e_netpoll(struct net_device *netdev)  	pf->flags |= I40E_FLAG_IN_NETPOLL;  	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {  		for (i = 0; i < vsi->num_q_vectors; i++) -			i40e_msix_clean_rings(0, &vsi->q_vectors[i]); +			i40e_msix_clean_rings(0, vsi->q_vectors[i]);  	} else {  		i40e_intr(pf->pdev->irq, netdev);  	} @@ -2909,48 +3241,38 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)  	pf_q = vsi->base_queue;  	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { -		j = 1000; -		do { -			usleep_range(1000, 2000); -			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); -		} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) -			       ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); -		if (enable) { -			/* is STAT set ? */ -			if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { -				dev_info(&pf->pdev->dev, -					 "Tx %d already enabled\n", i); -				continue; -			} -		} else { -			/* is !STAT set ? */ -			if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) { -				dev_info(&pf->pdev->dev, -					 "Tx %d already disabled\n", i); -				continue; -			} +		/* warn the TX unit of coming changes */ +		i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); +		if (!enable) +			udelay(10); + +		for (j = 0; j < 50; j++) { +			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); +			if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == +			    ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) +				break; +			usleep_range(1000, 2000);  		} +		/* Skip if the queue is already in the requested state */ +		if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) +			continue;  		/* turn on/off the queue */ -		if (enable) -			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | -				  I40E_QTX_ENA_QENA_STAT_MASK; -		else +		if (enable) { +			wr32(hw, I40E_QTX_HEAD(pf_q), 0); +			tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; +		} else {  			tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; +		}  		wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);  		/* wait for the change to finish */  		for (j = 0; j < 10; j++) {  			tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); -			if (enable) { -				if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) -					break; -			} else { -				if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) -					break; -			} +			if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) +				break;  			udelay(10);  		} @@ -2961,6 +3283,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)  		}  	} +	if (hw->revision_id == 0) +		mdelay(50); +  	return 0;  } @@ -2978,43 +3303,31 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)  	pf_q = vsi->base_queue;  	for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { -		j = 1000; -		do { -			usleep_range(1000, 2000); +		for (j = 0; j < 50; j++) {  			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); -		} while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) -			       ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); - -		if (enable) { -			/* is STAT set ? */ -			if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) -				continue; -		} else { -			/* is !STAT set ? */ -			if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) -				continue; +			if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == +			    ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) +				break; +			usleep_range(1000, 2000);  		} +		/* Skip if the queue is already in the requested state */ +		if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) +			continue; +  		/* turn on/off the queue */  		if (enable) -			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | -				  I40E_QRX_ENA_QENA_STAT_MASK; +			rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;  		else -			rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | -				  I40E_QRX_ENA_QENA_STAT_MASK); +			rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;  		wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);  		/* wait for the change to finish */  		for (j = 0; j < 10; j++) {  			rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); -			if (enable) { -				if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) -					break; -			} else { -				if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) -					break; -			} +			if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) +				break;  			udelay(10);  		} @@ -3033,9 +3346,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)   * @vsi: the VSI being configured   * @enable: start or stop the rings   **/ -static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) +int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)  { -	int ret; +	int ret = 0;  	/* do rx first for enable and last for disable */  	if (request) { @@ -3044,10 +3357,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)  			return ret;  		ret = i40e_vsi_control_tx(vsi, request);  	} else { -		ret = i40e_vsi_control_tx(vsi, request); -		if (ret) -			return ret; -		ret = i40e_vsi_control_rx(vsi, request); +		/* Ignore return value, we need to shutdown whatever we can */ +		i40e_vsi_control_tx(vsi, request); +		i40e_vsi_control_rx(vsi, request);  	}  	return ret; @@ -3069,18 +3381,23 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)  		if (!vsi->q_vectors)  			return; +		if (!vsi->irqs_ready) +			return; + +		vsi->irqs_ready = false;  		for (i = 0; i < vsi->num_q_vectors; i++) {  			u16 vector = i + base;  			/* free only the irqs that were actually requested */ -			if (vsi->q_vectors[i].num_ringpairs == 0) +			if (!vsi->q_vectors[i] || +			    !vsi->q_vectors[i]->num_ringpairs)  				continue;  			/* clear the affinity_mask in the IRQ descriptor */  			irq_set_affinity_hint(pf->msix_entries[vector].vector,  					      NULL);  			free_irq(pf->msix_entries[vector].vector, -				 &vsi->q_vectors[i]); +				 vsi->q_vectors[i]);  			/* Tear down the interrupt queue link list  			 * @@ -3164,6 +3481,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)  }  /** + * i40e_free_q_vector - Free memory allocated for specific interrupt vector + * @vsi: the VSI being configured + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector.  In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx) +{ +	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; +	struct i40e_ring *ring; + +	if (!q_vector) +		return; + +	/* disassociate q_vector from rings */ +	i40e_for_each_ring(ring, q_vector->tx) +		ring->q_vector = NULL; + +	i40e_for_each_ring(ring, q_vector->rx) +		ring->q_vector = NULL; + +	/* only VSI w/ an associated netdev is set up w/ NAPI */ +	if (vsi->netdev) +		netif_napi_del(&q_vector->napi); + +	vsi->q_vectors[v_idx] = NULL; + +	kfree_rcu(q_vector, rcu); +} + +/**   * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors   * @vsi: the VSI being un-configured   * @@ -3174,24 +3524,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)  {  	int v_idx; -	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { -		struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx]; -		int r_idx; - -		if (!q_vector) -			continue; - -		/* disassociate q_vector from rings */ -		for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++) -			q_vector->tx.ring[r_idx]->q_vector = NULL; -		for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++) -			q_vector->rx.ring[r_idx]->q_vector = NULL; - -		/* only VSI w/ an associated netdev is set up w/ NAPI */ -		if (vsi->netdev) -			netif_napi_del(&q_vector->napi); -	} -	kfree(vsi->q_vectors); +	for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) +		i40e_free_q_vector(vsi, v_idx);  }  /** @@ -3223,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)  	int i;  	i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +	for (i = 0; i < pf->num_alloc_vsi; i++)  		if (pf->vsi[i])  			i40e_vsi_free_q_vectors(pf->vsi[i]);  	i40e_reset_interrupt_capability(pf); @@ -3241,7 +3575,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)  		return;  	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) -		napi_enable(&vsi->q_vectors[q_idx].napi); +		napi_enable(&vsi->q_vectors[q_idx]->napi);  }  /** @@ -3256,7 +3590,20 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)  		return;  	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) -		napi_disable(&vsi->q_vectors[q_idx].napi); +		napi_disable(&vsi->q_vectors[q_idx]->napi); +} + +/** + * i40e_vsi_close - Shut down a VSI + * @vsi: the vsi to be quelled + **/ +static void i40e_vsi_close(struct i40e_vsi *vsi) +{ +	if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) +		i40e_down(vsi); +	i40e_vsi_free_irq(vsi); +	i40e_vsi_free_tx_resources(vsi); +	i40e_vsi_free_rx_resources(vsi);  }  /** @@ -3272,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)  	if (vsi->netdev && netif_running(vsi->netdev)) {  		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);  	} else { -		set_bit(__I40E_DOWN, &vsi->state); -		i40e_down(vsi); +		i40e_vsi_close(vsi);  	}  } @@ -3290,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)  	if (vsi->netdev && netif_running(vsi->netdev))  		vsi->netdev->netdev_ops->ndo_open(vsi->netdev);  	else -		i40e_up(vsi);   /* this clears the DOWN bit */ +		i40e_vsi_open(vsi);   /* this clears the DOWN bit */  }  /** @@ -3301,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)  {  	int v; -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		if (pf->vsi[v])  			i40e_quiesce_vsi(pf->vsi[v]);  	} @@ -3315,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)  {  	int v; -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		if (pf->vsi[v])  			i40e_unquiesce_vsi(pf->vsi[v]);  	} @@ -3468,7 +3814,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)  	/* Get the VSI level BW configuration per TC */  	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, -					          NULL); +						  NULL);  	if (aq_ret) {  		dev_info(&pf->pdev->dev,  			 "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", @@ -3522,8 +3868,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,  					  NULL);  	if (aq_ret) {  		dev_info(&vsi->back->pdev->dev, -			 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", -			 __func__, vsi->back->hw.aq.asq_last_status); +			 "AQ command Config VSI BW allocation per TC failed = %d\n", +			 vsi->back->hw.aq.asq_last_status);  		return -EINVAL;  	} @@ -3679,6 +4025,206 @@ out:  }  /** + * i40e_veb_config_tc - Configure TCs for given VEB + * @veb: given VEB + * @enabled_tc: TC bitmap + * + * Configures given TC bitmap for VEB (switching) element + **/ +int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc) +{ +	struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0}; +	struct i40e_pf *pf = veb->pf; +	int ret = 0; +	int i; + +	/* No TCs or already enabled TCs just return */ +	if (!enabled_tc || veb->enabled_tc == enabled_tc) +		return ret; + +	bw_data.tc_valid_bits = enabled_tc; +	/* bw_data.absolute_credits is not set (relative) */ + +	/* Enable ETS TCs with equal BW Share for now */ +	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { +		if (enabled_tc & (1 << i)) +			bw_data.tc_bw_share_credits[i] = 1; +	} + +	ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, +						   &bw_data, NULL); +	if (ret) { +		dev_info(&pf->pdev->dev, +			 "veb bw config failed, aq_err=%d\n", +			 pf->hw.aq.asq_last_status); +		goto out; +	} + +	/* Update the BW information */ +	ret = i40e_veb_get_bw_info(veb); +	if (ret) { +		dev_info(&pf->pdev->dev, +			 "Failed getting veb bw config, aq_err=%d\n", +			 pf->hw.aq.asq_last_status); +	} + +out: +	return ret; +} + +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs + * @pf: PF struct + * + * Reconfigure VEB/VSIs on a given PF; it is assumed that + * the caller would've quiesce all the VSIs before calling + * this function + **/ +static void i40e_dcb_reconfigure(struct i40e_pf *pf) +{ +	u8 tc_map = 0; +	int ret; +	u8 v; + +	/* Enable the TCs available on PF to all VEBs */ +	tc_map = i40e_pf_get_tc_map(pf); +	for (v = 0; v < I40E_MAX_VEB; v++) { +		if (!pf->veb[v]) +			continue; +		ret = i40e_veb_config_tc(pf->veb[v], tc_map); +		if (ret) { +			dev_info(&pf->pdev->dev, +				 "Failed configuring TC for VEB seid=%d\n", +				 pf->veb[v]->seid); +			/* Will try to configure as many components */ +		} +	} + +	/* Update each VSI */ +	for (v = 0; v < pf->num_alloc_vsi; v++) { +		if (!pf->vsi[v]) +			continue; + +		/* - Enable all TCs for the LAN VSI +		 * - For all others keep them at TC0 for now +		 */ +		if (v == pf->lan_vsi) +			tc_map = i40e_pf_get_tc_map(pf); +		else +			tc_map = i40e_pf_get_default_tc(pf); + +		ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); +		if (ret) { +			dev_info(&pf->pdev->dev, +				 "Failed configuring TC for VSI seid=%d\n", +				 pf->vsi[v]->seid); +			/* Will try to configure as many components */ +		} else { +			/* Re-configure VSI vectors based on updated TC map */ +			i40e_vsi_map_rings_to_vectors(pf->vsi[v]); +			if (pf->vsi[v]->netdev) +				i40e_dcbnl_set_all(pf->vsi[v]); +		} +	} +} + +/** + * i40e_init_pf_dcb - Initialize DCB configuration + * @pf: PF being configured + * + * Query the current DCB configuration and cache it + * in the hardware structure + **/ +static int i40e_init_pf_dcb(struct i40e_pf *pf) +{ +	struct i40e_hw *hw = &pf->hw; +	int err = 0; + +	if (pf->hw.func_caps.npar_enable) +		goto out; + +	/* Get the initial DCB configuration */ +	err = i40e_init_dcb(hw); +	if (!err) { +		/* Device/Function is not DCBX capable */ +		if ((!hw->func_caps.dcb) || +		    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { +			dev_info(&pf->pdev->dev, +				 "DCBX offload is not supported or is disabled for this PF.\n"); + +			if (pf->flags & I40E_FLAG_MFP_ENABLED) +				goto out; + +		} else { +			/* When status is not DISABLED then DCBX in FW */ +			pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | +				       DCB_CAP_DCBX_VER_IEEE; + +			pf->flags |= I40E_FLAG_DCB_CAPABLE; +			/* Enable DCB tagging only when more than one TC */ +			if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) +				pf->flags |= I40E_FLAG_DCB_ENABLED; +		} +	} else { +		dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", +			 pf->hw.aq.asq_last_status); +	} + +out: +	return err; +} +#endif /* CONFIG_I40E_DCB */ +#define SPEED_SIZE 14 +#define FC_SIZE 8 +/** + * i40e_print_link_message - print link up or down + * @vsi: the VSI for which link needs a message + */ +static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) +{ +	char speed[SPEED_SIZE] = "Unknown"; +	char fc[FC_SIZE] = "RX/TX"; + +	if (!isup) { +		netdev_info(vsi->netdev, "NIC Link is Down\n"); +		return; +	} + +	switch (vsi->back->hw.phy.link_info.link_speed) { +	case I40E_LINK_SPEED_40GB: +		strncpy(speed, "40 Gbps", SPEED_SIZE); +		break; +	case I40E_LINK_SPEED_10GB: +		strncpy(speed, "10 Gbps", SPEED_SIZE); +		break; +	case I40E_LINK_SPEED_1GB: +		strncpy(speed, "1000 Mbps", SPEED_SIZE); +		break; +	default: +		break; +	} + +	switch (vsi->back->hw.fc.current_mode) { +	case I40E_FC_FULL: +		strncpy(fc, "RX/TX", FC_SIZE); +		break; +	case I40E_FC_TX_PAUSE: +		strncpy(fc, "TX", FC_SIZE); +		break; +	case I40E_FC_RX_PAUSE: +		strncpy(fc, "RX", FC_SIZE); +		break; +	default: +		strncpy(fc, "None", FC_SIZE); +		break; +	} + +	netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n", +		    speed, fc); +} + +/**   * i40e_up_complete - Finish the last steps of bringing up a connection   * @vsi: the VSI being configured   **/ @@ -3703,9 +4249,16 @@ static int i40e_up_complete(struct i40e_vsi *vsi)  	if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&  	    (vsi->netdev)) { +		i40e_print_link_message(vsi, true);  		netif_tx_start_all_queues(vsi->netdev);  		netif_carrier_on(vsi->netdev); +	} else if (vsi->netdev) { +		i40e_print_link_message(vsi, false);  	} + +	/* replay FDIR SB filters */ +	if (vsi->type == I40E_VSI_FDIR) +		i40e_fdir_filter_restore(vsi);  	i40e_service_event_schedule(pf);  	return 0; @@ -3772,8 +4325,8 @@ void i40e_down(struct i40e_vsi *vsi)  	i40e_napi_disable_all(vsi);  	for (i = 0; i < vsi->num_queue_pairs; i++) { -		i40e_clean_tx_ring(&vsi->tx_rings[i]); -		i40e_clean_rx_ring(&vsi->rx_rings[i]); +		i40e_clean_tx_ring(vsi->tx_rings[i]); +		i40e_clean_rx_ring(vsi->rx_rings[i]);  	}  } @@ -3852,15 +4405,48 @@ static int i40e_open(struct net_device *netdev)  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back; -	char int_name[IFNAMSIZ];  	int err; -	/* disallow open during test */ -	if (test_bit(__I40E_TESTING, &pf->state)) +	/* disallow open during test or if eeprom is broken */ +	if (test_bit(__I40E_TESTING, &pf->state) || +	    test_bit(__I40E_BAD_EEPROM, &pf->state))  		return -EBUSY;  	netif_carrier_off(netdev); +	err = i40e_vsi_open(vsi); +	if (err) +		return err; + +	/* configure global TSO hardware offload settings */ +	wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | +						       TCP_FLAG_FIN) >> 16); +	wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | +						       TCP_FLAG_FIN | +						       TCP_FLAG_CWR) >> 16); +	wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); + +#ifdef CONFIG_I40E_VXLAN +	vxlan_get_rx_port(netdev); +#endif + +	return 0; +} + +/** + * i40e_vsi_open - + * @vsi: the VSI to open + * + * Finish initialization of the VSI. + * + * Returns 0 on success, negative value on failure + **/ +int i40e_vsi_open(struct i40e_vsi *vsi) +{ +	struct i40e_pf *pf = vsi->back; +	char int_name[IFNAMSIZ]; +	int err; +  	/* allocate descriptors */  	err = i40e_vsi_setup_tx_resources(vsi);  	if (err) @@ -3873,28 +4459,42 @@ static int i40e_open(struct net_device *netdev)  	if (err)  		goto err_setup_rx; -	snprintf(int_name, sizeof(int_name) - 1, "%s-%s", -		 dev_driver_string(&pf->pdev->dev), netdev->name); -	err = i40e_vsi_request_irq(vsi, int_name); -	if (err) +	if (vsi->netdev) { +		snprintf(int_name, sizeof(int_name) - 1, "%s-%s", +			 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); +		err = i40e_vsi_request_irq(vsi, int_name); +		if (err) +			goto err_setup_rx; + +		/* Notify the stack of the actual queue counts. */ +		err = netif_set_real_num_tx_queues(vsi->netdev, +						   vsi->num_queue_pairs); +		if (err) +			goto err_set_queues; + +		err = netif_set_real_num_rx_queues(vsi->netdev, +						   vsi->num_queue_pairs); +		if (err) +			goto err_set_queues; + +	} else if (vsi->type == I40E_VSI_FDIR) { +		snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", +			 dev_driver_string(&pf->pdev->dev)); +		err = i40e_vsi_request_irq(vsi, int_name); +	} else { +		err = -EINVAL;  		goto err_setup_rx; +	}  	err = i40e_up_complete(vsi);  	if (err)  		goto err_up_complete; -	if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) { -		err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL); -		if (err) -			netdev_info(netdev, -				    "couldn't set broadcast err %d aq_err %d\n", -				    err, pf->hw.aq.asq_last_status); -	} -  	return 0;  err_up_complete:  	i40e_down(vsi); +err_set_queues:  	i40e_vsi_free_irq(vsi);  err_setup_rx:  	i40e_vsi_free_rx_resources(vsi); @@ -3907,6 +4507,26 @@ err_setup_tx:  }  /** + * i40e_fdir_filter_exit - Cleans up the Flow Director accounting + * @pf: Pointer to pf + * + * This function destroys the hlist where all the Flow Director + * filters were saved. + **/ +static void i40e_fdir_filter_exit(struct i40e_pf *pf) +{ +	struct i40e_fdir_filter *filter; +	struct hlist_node *node2; + +	hlist_for_each_entry_safe(filter, node2, +				  &pf->fdir_filter_list, fdir_node) { +		hlist_del(&filter->fdir_node); +		kfree(filter); +	} +	pf->fdir_pf_active_filters = 0; +} + +/**   * i40e_close - Disables a network interface   * @netdev: network interface device structure   * @@ -3921,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi; -	if (test_and_set_bit(__I40E_DOWN, &vsi->state)) -		return 0; - -	i40e_down(vsi); -	i40e_vsi_free_irq(vsi); - -	i40e_vsi_free_tx_resources(vsi); -	i40e_vsi_free_rx_resources(vsi); +	i40e_vsi_close(vsi);  	return 0;  } @@ -3948,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  	WARN_ON(in_interrupt()); +	if (i40e_check_asq_alive(&pf->hw)) +		i40e_vc_notify_reset(pf); +  	/* do the biggest reset indicated */  	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) { @@ -3959,7 +4575,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  		 * for the warning interrupt will deal with the shutdown  		 * and recovery of the switch setup.  		 */ -		dev_info(&pf->pdev->dev, "GlobalR requested\n"); +		dev_dbg(&pf->pdev->dev, "GlobalR requested\n");  		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);  		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;  		wr32(&pf->hw, I40E_GLGEN_RTRIG, val); @@ -3970,12 +4586,30 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  		 *  		 * Same as Global Reset, except does *not* include the MAC/PHY  		 */ -		dev_info(&pf->pdev->dev, "CoreR requested\n"); +		dev_dbg(&pf->pdev->dev, "CoreR requested\n");  		val = rd32(&pf->hw, I40E_GLGEN_RTRIG);  		val |= I40E_GLGEN_RTRIG_CORER_MASK;  		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);  		i40e_flush(&pf->hw); +	} else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { + +		/* Request a Firmware Reset +		 * +		 * Same as Global reset, plus restarting the +		 * embedded firmware engine. +		 */ +		/* enable EMP Reset */ +		val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); +		val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; +		wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); + +		/* force the reset */ +		val = rd32(&pf->hw, I40E_GLGEN_RTRIG); +		val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; +		wr32(&pf->hw, I40E_GLGEN_RTRIG, val); +		i40e_flush(&pf->hw); +  	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {  		/* Request a PF Reset @@ -3986,7 +4620,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  		 * the switch, since we need to do all the recovery as  		 * for the Core Reset.  		 */ -		dev_info(&pf->pdev->dev, "PFR requested\n"); +		dev_dbg(&pf->pdev->dev, "PFR requested\n");  		i40e_handle_reset_warning(pf);  	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { @@ -3995,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  		/* Find the VSI(s) that requested a re-init */  		dev_info(&pf->pdev->dev,  			 "VSI reinit requested\n"); -		for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +		for (v = 0; v < pf->num_alloc_vsi; v++) {  			struct i40e_vsi *vsi = pf->vsi[v];  			if (vsi != NULL &&  			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { @@ -4013,6 +4647,154 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)  	}  } +#ifdef CONFIG_I40E_DCB +/** + * i40e_dcb_need_reconfig - Check if DCB needs reconfig + * @pf: board private structure + * @old_cfg: current DCB config + * @new_cfg: new DCB config + **/ +bool i40e_dcb_need_reconfig(struct i40e_pf *pf, +			    struct i40e_dcbx_config *old_cfg, +			    struct i40e_dcbx_config *new_cfg) +{ +	bool need_reconfig = false; + +	/* Check if ETS configuration has changed */ +	if (memcmp(&new_cfg->etscfg, +		   &old_cfg->etscfg, +		   sizeof(new_cfg->etscfg))) { +		/* If Priority Table has changed reconfig is needed */ +		if (memcmp(&new_cfg->etscfg.prioritytable, +			   &old_cfg->etscfg.prioritytable, +			   sizeof(new_cfg->etscfg.prioritytable))) { +			need_reconfig = true; +			dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); +		} + +		if (memcmp(&new_cfg->etscfg.tcbwtable, +			   &old_cfg->etscfg.tcbwtable, +			   sizeof(new_cfg->etscfg.tcbwtable))) +			dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); + +		if (memcmp(&new_cfg->etscfg.tsatable, +			   &old_cfg->etscfg.tsatable, +			   sizeof(new_cfg->etscfg.tsatable))) +			dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); +	} + +	/* Check if PFC configuration has changed */ +	if (memcmp(&new_cfg->pfc, +		   &old_cfg->pfc, +		   sizeof(new_cfg->pfc))) { +		need_reconfig = true; +		dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); +	} + +	/* Check if APP Table has changed */ +	if (memcmp(&new_cfg->app, +		   &old_cfg->app, +		   sizeof(new_cfg->app))) { +		need_reconfig = true; +		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); +	} + +	return need_reconfig; +} + +/** + * i40e_handle_lldp_event - Handle LLDP Change MIB event + * @pf: board private structure + * @e: event info posted on ARQ + **/ +static int i40e_handle_lldp_event(struct i40e_pf *pf, +				  struct i40e_arq_event_info *e) +{ +	struct i40e_aqc_lldp_get_mib *mib = +		(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; +	struct i40e_hw *hw = &pf->hw; +	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; +	struct i40e_dcbx_config tmp_dcbx_cfg; +	bool need_reconfig = false; +	int ret = 0; +	u8 type; + +	/* Not DCB capable or capability disabled */ +	if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) +		return ret; + +	/* Ignore if event is not for Nearest Bridge */ +	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) +		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK); +	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) +		return ret; + +	/* Check MIB Type and return if event for Remote MIB update */ +	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; +	if (type == I40E_AQ_LLDP_MIB_REMOTE) { +		/* Update the remote cached instance and return */ +		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, +				I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, +				&hw->remote_dcbx_config); +		goto exit; +	} + +	/* Convert/store the DCBX data from LLDPDU temporarily */ +	memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); +	ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); +	if (ret) { +		/* Error in LLDPDU parsing return */ +		dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); +		goto exit; +	} + +	/* No change detected in DCBX configs */ +	if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { +		dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); +		goto exit; +	} + +	need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); + +	i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); + +	/* Overwrite the new configuration */ +	*dcbx_cfg = tmp_dcbx_cfg; + +	if (!need_reconfig) +		goto exit; + +	/* Enable DCB tagging only when more than one TC */ +	if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) +		pf->flags |= I40E_FLAG_DCB_ENABLED; +	else +		pf->flags &= ~I40E_FLAG_DCB_ENABLED; + +	/* Reconfiguration needed quiesce all VSIs */ +	i40e_pf_quiesce_all_vsi(pf); + +	/* Changes in configuration update VEB/VSI */ +	i40e_dcb_reconfigure(pf); + +	i40e_pf_unquiesce_all_vsi(pf); +exit: +	return ret; +} +#endif /* CONFIG_I40E_DCB */ + +/** + * i40e_do_reset_safe - Protected reset path for userland calls. + * @pf: board private structure + * @reset_flags: which reset is requested + * + **/ +void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) +{ +	rtnl_lock(); +	i40e_do_reset(pf, reset_flags); +	rtnl_unlock(); +} +  /**   * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event   * @pf: board private structure @@ -4032,8 +4814,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,  	struct i40e_vf *vf;  	u16 vf_id; -	dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", -		 __func__, queue, qtx_ctl); +	dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", +		queue, qtx_ctl);  	/* Queue belongs to VF, find the VF and issue VF reset */  	if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) @@ -4058,11 +4840,58 @@ static void i40e_service_event_complete(struct i40e_pf *pf)  	BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));  	/* flush memory to make sure state is correct before next watchog */ -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(__I40E_SERVICE_SCHED, &pf->state);  }  /** + * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW + * @pf: board private structure + **/ +int i40e_get_current_fd_count(struct i40e_pf *pf) +{ +	int val, fcnt_prog; +	val = rd32(&pf->hw, I40E_PFQF_FDSTAT); +	fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + +		    ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> +		      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); +	return fcnt_prog; +} + +/** + * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled + * @pf: board private structure + **/ +void i40e_fdir_check_and_reenable(struct i40e_pf *pf) +{ +	u32 fcnt_prog, fcnt_avail; + +	/* Check if, FD SB or ATR was auto disabled and if there is enough room +	 * to re-enable +	 */ +	if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && +	    (pf->flags & I40E_FLAG_FD_SB_ENABLED)) +		return; +	fcnt_prog = i40e_get_current_fd_count(pf); +	fcnt_avail = i40e_get_fd_cnt_all(pf); +	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { +		if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && +		    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { +			pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; +			dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); +		} +	} +	/* Wait for some more space to be available to turn on ATR */ +	if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { +		if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && +		    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { +			pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; +			dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); +		} +	} +} + +/**   * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table   * @pf: board private structure   **/ @@ -4071,11 +4900,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)  	if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))  		return; -	pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; -  	/* if interface is down do nothing */  	if (test_bit(__I40E_DOWN, &pf->state))  		return; +	i40e_fdir_check_and_reenable(pf); + +	if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && +	    (pf->flags & I40E_FLAG_FD_SB_ENABLED)) +		pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;  }  /** @@ -4134,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)  			i40e_veb_link_event(pf->veb[i], link_up);  	/* ... now the local VSIs */ -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +	for (i = 0; i < pf->num_alloc_vsi; i++)  		if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))  			i40e_vsi_link_event(pf->vsi[i], link_up);  } @@ -4152,9 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)  	if (new_link == old_link)  		return; - -	netdev_info(pf->vsi[pf->lan_vsi]->netdev, -		    "NIC Link is %s\n", (new_link ? "Up" : "Down")); +	if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) +		i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);  	/* Notify the base of the switch tree connected to  	 * the link.  Floating VEBs are not notified. @@ -4166,6 +4997,9 @@ static void i40e_link_event(struct i40e_pf *pf)  	if (pf->vf)  		i40e_vc_notify_link_state(pf); + +	if (pf->flags & I40E_FLAG_PTP) +		i40e_ptp_set_increment(pf);  }  /** @@ -4189,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)  	 *     for each q_vector  	 *         force an interrupt  	 */ -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		struct i40e_vsi *vsi = pf->vsi[v];  		int armed = 0; @@ -4199,9 +5033,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)  			continue;  		for (i = 0; i < vsi->num_queue_pairs; i++) { -			set_check_for_tx_hang(&vsi->tx_rings[i]); +			set_check_for_tx_hang(vsi->tx_rings[i]);  			if (test_bit(__I40E_HANG_CHECK_ARMED, -				     &vsi->tx_rings[i].state)) +				     &vsi->tx_rings[i]->state))  				armed++;  		} @@ -4239,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)  	/* Update the stats for active netdevs so the network stack  	 * can look at updated numbers whenever it cares to  	 */ -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +	for (i = 0; i < pf->num_alloc_vsi; i++)  		if (pf->vsi[i] && pf->vsi[i]->netdev)  			i40e_update_stats(pf->vsi[i]); @@ -4247,6 +5081,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)  	for (i = 0; i < I40E_MAX_VEB; i++)  		if (pf->veb[i])  			i40e_update_veb_stats(pf->veb[i]); + +	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);  }  /** @@ -4257,6 +5093,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)  {  	u32 reset_flags = 0; +	rtnl_lock();  	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {  		reset_flags |= (1 << __I40E_REINIT_REQUESTED);  		clear_bit(__I40E_REINIT_REQUESTED, &pf->state); @@ -4279,7 +5116,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)  	 */  	if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {  		i40e_handle_reset_warning(pf); -		return; +		goto unlock;  	}  	/* If we're already down or resetting, just bail */ @@ -4287,6 +5124,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)  	    !test_bit(__I40E_DOWN, &pf->state) &&  	    !test_bit(__I40E_CONFIG_BUSY, &pf->state))  		i40e_do_reset(pf, reset_flags); + +unlock: +	rtnl_unlock();  }  /** @@ -4339,17 +5179,54 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)  	u16 pending, i = 0;  	i40e_status ret;  	u16 opcode; +	u32 oldval;  	u32 val;  	if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))  		return; +	/* check for error indications */ +	val = rd32(&pf->hw, pf->hw.aq.arq.len); +	oldval = val; +	if (val & I40E_PF_ARQLEN_ARQVFE_MASK) { +		dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); +		val &= ~I40E_PF_ARQLEN_ARQVFE_MASK; +	} +	if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) { +		dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); +		val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; +	} +	if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { +		dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); +		val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK; +	} +	if (oldval != val) +		wr32(&pf->hw, pf->hw.aq.arq.len, val); + +	val = rd32(&pf->hw, pf->hw.aq.asq.len); +	oldval = val; +	if (val & I40E_PF_ATQLEN_ATQVFE_MASK) { +		dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); +		val &= ~I40E_PF_ATQLEN_ATQVFE_MASK; +	} +	if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) { +		dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); +		val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK; +	} +	if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) { +		dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); +		val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK; +	} +	if (oldval != val) +		wr32(&pf->hw, pf->hw.aq.asq.len, val); +  	event.msg_size = I40E_MAX_AQ_BUF_SIZE;  	event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);  	if (!event.msg_buf)  		return;  	do { +		event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */  		ret = i40e_clean_arq_element(hw, &event, &pending);  		if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {  			dev_info(&pf->pdev->dev, "No ARQ event found\n"); @@ -4374,16 +5251,24 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)  					event.msg_size);  			break;  		case i40e_aqc_opc_lldp_update_mib: -			dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); +			dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); +#ifdef CONFIG_I40E_DCB +			rtnl_lock(); +			ret = i40e_handle_lldp_event(pf, &event); +			rtnl_unlock(); +#endif /* CONFIG_I40E_DCB */  			break;  		case i40e_aqc_opc_event_lan_overflow: -			dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); +			dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");  			i40e_handle_lan_overflow_event(pf, &event);  			break; +		case i40e_aqc_opc_send_msg_to_peer: +			dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); +			break;  		default:  			dev_info(&pf->pdev->dev, -				 "ARQ Error: Unknown event %d received\n", -				 event.desc.opcode); +				 "ARQ Error: Unknown event 0x%04x received\n", +				 opcode);  			break;  		}  	} while (pending && (i++ < pf->adminq_work_limit)); @@ -4399,6 +5284,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)  }  /** + * i40e_verify_eeprom - make sure eeprom is good to use + * @pf: board private structure + **/ +static void i40e_verify_eeprom(struct i40e_pf *pf) +{ +	int err; + +	err = i40e_diag_eeprom_test(&pf->hw); +	if (err) { +		/* retry in case of garbage read */ +		err = i40e_diag_eeprom_test(&pf->hw); +		if (err) { +			dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", +				 err); +			set_bit(__I40E_BAD_EEPROM, &pf->state); +		} +	} + +	if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { +		dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); +		clear_bit(__I40E_BAD_EEPROM, &pf->state); +	} +} + +/**   * i40e_reconstitute_veb - rebuild the VEB and anything connected to it   * @veb: pointer to the VEB instance   * @@ -4415,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)  	int ret;  	/* build VSI that owns this VEB, temporarily attached to base VEB */ -	for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) { +	for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {  		if (pf->vsi[v] &&  		    pf->vsi[v]->veb_idx == veb->idx &&  		    pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { @@ -4445,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)  		goto end_reconstitute;  	/* create the remaining VSIs attached to this VEB */ -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)  			continue; @@ -4513,6 +5423,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)  		}  	} while (err); +	if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || +	    (pf->hw.aq.fw_maj_ver < 2)) { +		pf->hw.func_caps.num_msix_vectors++; +		pf->hw.func_caps.num_msix_vectors_vf++; +	} +  	if (pf->hw.debug_mask & I40E_DEBUG_USER)  		dev_info(&pf->pdev->dev,  			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", @@ -4524,56 +5440,53 @@ static int i40e_get_capabilities(struct i40e_pf *pf)  			 pf->hw.func_caps.num_tx_qp,  			 pf->hw.func_caps.num_vsis); +#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ +		       + pf->hw.func_caps.num_vfs) +	if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { +		dev_info(&pf->pdev->dev, +			 "got num_vsis %d, setting num_vsis to %d\n", +			 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); +		pf->hw.func_caps.num_vsis = DEF_NUM_VSI; +	} +  	return 0;  } +static int i40e_vsi_clear(struct i40e_vsi *vsi); +  /** - * i40e_fdir_setup - initialize the Flow Director resources + * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband   * @pf: board private structure   **/ -static void i40e_fdir_setup(struct i40e_pf *pf) +static void i40e_fdir_sb_setup(struct i40e_pf *pf)  {  	struct i40e_vsi *vsi; -	bool new_vsi = false; -	int err, i; +	int i; -	if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) +	if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))  		return; -	pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - -	/* find existing or make new FDIR VSI */ +	/* find existing VSI and see if it needs configuring */  	vsi = NULL; -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) -		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) +	for (i = 0; i < pf->num_alloc_vsi; i++) { +		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {  			vsi = pf->vsi[i]; +			break; +		} +	} + +	/* create a new VSI if none exists */  	if (!vsi) { -		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0); +		vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, +				     pf->vsi[pf->lan_vsi]->seid, 0);  		if (!vsi) {  			dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); -			pf->flags &= ~I40E_FLAG_FDIR_ENABLED; +			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;  			return;  		} -		new_vsi = true; -	} -	WARN_ON(vsi->base_queue != I40E_FDIR_RING); -	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings); - -	err = i40e_vsi_setup_tx_resources(vsi); -	if (!err) -		err = i40e_vsi_setup_rx_resources(vsi); -	if (!err) -		err = i40e_vsi_configure(vsi); -	if (!err && new_vsi) { -		char int_name[IFNAMSIZ + 9]; -		snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", -			 dev_driver_string(&pf->pdev->dev)); -		err = i40e_vsi_request_irq(vsi, int_name);  	} -	if (!err) -		err = i40e_up_complete(vsi); -	clear_bit(__I40E_NEEDS_RESTART, &vsi->state); +	i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);  }  /** @@ -4584,7 +5497,8 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)  {  	int i; -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { +	i40e_fdir_filter_exit(pf); +	for (i = 0; i < pf->num_alloc_vsi; i++) {  		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {  			i40e_vsi_release(pf->vsi[i]);  			break; @@ -4593,49 +5507,86 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)  }  /** - * i40e_handle_reset_warning - prep for the core to reset + * i40e_prep_for_reset - prep for the core to reset   * @pf: board private structure   * - * Close up the VFs and other things in prep for a Core Reset, - * then get ready to rebuild the world. - **/ -static void i40e_handle_reset_warning(struct i40e_pf *pf) + * Close up the VFs and other things in prep for pf Reset. +  **/ +static int i40e_prep_for_reset(struct i40e_pf *pf)  { -	struct i40e_driver_version dv;  	struct i40e_hw *hw = &pf->hw; -	i40e_status ret; +	i40e_status ret = 0;  	u32 v;  	clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);  	if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) -		return; - -	dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); +		return 0; -	i40e_vc_notify_reset(pf); +	dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");  	/* quiesce the VSIs and their queues that are not already DOWN */  	i40e_pf_quiesce_all_vsi(pf); -	for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { +	for (v = 0; v < pf->num_alloc_vsi; v++) {  		if (pf->vsi[v])  			pf->vsi[v]->seid = 0;  	}  	i40e_shutdown_adminq(&pf->hw); +	/* call shutdown HMC */ +	if (hw->hmc.hmc_obj) { +		ret = i40e_shutdown_lan_hmc(hw); +		if (ret) { +			dev_warn(&pf->pdev->dev, +				 "shutdown_lan_hmc failed: %d\n", ret); +			clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); +		} +	} +	return ret; +} + +/** + * i40e_send_version - update firmware with driver version + * @pf: PF struct + */ +static void i40e_send_version(struct i40e_pf *pf) +{ +	struct i40e_driver_version dv; + +	dv.major_version = DRV_VERSION_MAJOR; +	dv.minor_version = DRV_VERSION_MINOR; +	dv.build_version = DRV_VERSION_BUILD; +	dv.subbuild_version = 0; +	strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); +	i40e_aq_send_driver_version(&pf->hw, &dv, NULL); +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +{ +	struct i40e_hw *hw = &pf->hw; +	i40e_status ret; +	u32 v; +  	/* Now we wait for GRST to settle out.  	 * We don't have to delete the VEBs or VSIs from the hw switch  	 * because the reset will make them disappear.  	 */  	ret = i40e_pf_reset(hw); -	if (ret) +	if (ret) {  		dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); +		goto end_core_reset; +	}  	pf->pfr_count++;  	if (test_bit(__I40E_DOWN, &pf->state))  		goto end_core_reset; -	dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); +	dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");  	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */  	ret = i40e_init_adminq(&pf->hw); @@ -4644,6 +5595,13 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)  		goto end_core_reset;  	} +	/* re-verify the eeprom if we just had an EMP reset */ +	if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { +		clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); +		i40e_verify_eeprom(pf); +	} + +	i40e_clear_pxe_mode(hw);  	ret = i40e_get_capabilities(pf);  	if (ret) {  		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", @@ -4651,13 +5609,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)  		goto end_core_reset;  	} -	/* call shutdown HMC */ -	ret = i40e_shutdown_lan_hmc(hw); -	if (ret) { -		dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret); -		goto end_core_reset; -	} -  	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,  				hw->func_caps.num_rx_qp,  				pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); @@ -4671,8 +5622,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)  		goto end_core_reset;  	} +#ifdef CONFIG_I40E_DCB +	ret = i40e_init_pf_dcb(pf); +	if (ret) { +		dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret); +		goto end_core_reset; +	} +#endif /* CONFIG_I40E_DCB */ +  	/* do basic switch setup */ -	ret = i40e_setup_pf_switch(pf); +	ret = i40e_setup_pf_switch(pf, reinit);  	if (ret)  		goto end_core_reset; @@ -4684,7 +5643,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)  	 * try to recover minimal use by getting the basic PF VSI working.  	 */  	if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { -		dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); +		dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");  		/* find the one VEB connected to the MAC, and find orphans */  		for (v = 0; v < I40E_MAX_VEB; v++) {  			if (!pf->veb[v]) @@ -4737,20 +5696,35 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)  	/* restart the VSIs that were rebuilt and running before the reset */  	i40e_pf_unquiesce_all_vsi(pf); -	/* tell the firmware that we're starting */ -	dv.major_version = DRV_VERSION_MAJOR; -	dv.minor_version = DRV_VERSION_MINOR; -	dv.build_version = DRV_VERSION_BUILD; -	dv.subbuild_version = 0; -	i40e_aq_send_driver_version(&pf->hw, &dv, NULL); +	if (pf->num_alloc_vfs) { +		for (v = 0; v < pf->num_alloc_vfs; v++) +			i40e_reset_vf(&pf->vf[v], true); +	} -	dev_info(&pf->pdev->dev, "PF reset done\n"); +	/* tell the firmware that we're starting */ +	i40e_send_version(pf);  end_core_reset:  	clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);  }  /** + * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild + * @pf: board private structure + * + * Close up the VFs and other things in prep for a Core Reset, + * then get ready to rebuild the world. + **/ +static void i40e_handle_reset_warning(struct i40e_pf *pf) +{ +	i40e_status ret; + +	ret = i40e_prep_for_reset(pf); +	if (!ret) +		i40e_reset_and_rebuild(pf, false); +} + +/**   * i40e_handle_mdd_event   * @pf: pointer to the pf structure   * @@ -4777,7 +5751,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)  		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)  				>> I40E_GL_MDET_TX_QUEUE_SHIFT;  		dev_info(&pf->pdev->dev, -			 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", +			 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",  			 event, queue, func);  		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);  		mdd_detected = true; @@ -4791,7 +5765,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)  		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)  				>> I40E_GL_MDET_RX_QUEUE_SHIFT;  		dev_info(&pf->pdev->dev, -			 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", +			 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",  			 event, queue, func);  		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);  		mdd_detected = true; @@ -4831,6 +5805,50 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)  	i40e_flush(hw);  } +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW + * @pf: board private structure + **/ +static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) +{ +	struct i40e_hw *hw = &pf->hw; +	i40e_status ret; +	u8 filter_index; +	__be16 port; +	int i; + +	if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) +		return; + +	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; + +	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { +		if (pf->pending_vxlan_bitmap & (1 << i)) { +			pf->pending_vxlan_bitmap &= ~(1 << i); +			port = pf->vxlan_ports[i]; +			ret = port ? +			      i40e_aq_add_udp_tunnel(hw, ntohs(port), +						     I40E_AQC_TUNNEL_TYPE_VXLAN, +						     &filter_index, NULL) +			      : i40e_aq_del_udp_tunnel(hw, i, NULL); + +			if (ret) { +				dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", +					 port ? "adding" : "deleting", +					 ntohs(port), port ? i : i); + +				pf->vxlan_ports[i] = 0; +			} else { +				dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", +					 port ? "Added" : "Deleted", +					 ntohs(port), port ? i : filter_index); +			} +		} +	} +} + +#endif  /**   * i40e_service_task - Run the driver's async subtasks   * @work: pointer to work_struct containing our data @@ -4849,6 +5867,9 @@ static void i40e_service_task(struct work_struct *work)  	i40e_fdir_reinit_subtask(pf);  	i40e_check_hang_subtask(pf);  	i40e_sync_filters_subtask(pf); +#ifdef CONFIG_I40E_VXLAN +	i40e_sync_vxlan_filters_subtask(pf); +#endif  	i40e_clean_adminq_subtask(pf);  	i40e_service_event_complete(pf); @@ -4926,6 +5947,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)  }  /** + * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi + * @type: VSI pointer + * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) +{ +	int size; +	int ret = 0; + +	/* allocate memory for both Tx and Rx ring pointers */ +	size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2; +	vsi->tx_rings = kzalloc(size, GFP_KERNEL); +	if (!vsi->tx_rings) +		return -ENOMEM; +	vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs]; + +	if (alloc_qvectors) { +		/* allocate memory for q_vector pointers */ +		size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; +		vsi->q_vectors = kzalloc(size, GFP_KERNEL); +		if (!vsi->q_vectors) { +			ret = -ENOMEM; +			goto err_vectors; +		} +	} +	return ret; + +err_vectors: +	kfree(vsi->tx_rings); +	return ret; +} + +/**   * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF   * @pf: board private structure   * @type: type of VSI @@ -4950,26 +6007,26 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)  	 * find next empty vsi slot, looping back around if necessary  	 */  	i = pf->next_vsi; -	while (i < pf->hw.func_caps.num_vsis && pf->vsi[i]) +	while (i < pf->num_alloc_vsi && pf->vsi[i])  		i++; -	if (i >= pf->hw.func_caps.num_vsis) { +	if (i >= pf->num_alloc_vsi) {  		i = 0;  		while (i < pf->next_vsi && pf->vsi[i])  			i++;  	} -	if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) { +	if (i < pf->num_alloc_vsi && !pf->vsi[i]) {  		vsi_idx = i;             /* Found one! */  	} else {  		ret = -ENODEV; -		goto err_alloc_vsi;  /* out of VSI slots! */ +		goto unlock_pf;  /* out of VSI slots! */  	}  	pf->next_vsi = ++i;  	vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);  	if (!vsi) {  		ret = -ENOMEM; -		goto err_alloc_vsi; +		goto unlock_pf;  	}  	vsi->type = type;  	vsi->back = pf; @@ -4981,20 +6038,52 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)  	vsi->netdev_registered = false;  	vsi->work_limit = I40E_DEFAULT_IRQ_WORK;  	INIT_LIST_HEAD(&vsi->mac_filter_list); +	vsi->irqs_ready = false; -	i40e_set_num_rings_in_vsi(vsi); +	ret = i40e_set_num_rings_in_vsi(vsi); +	if (ret) +		goto err_rings; + +	ret = i40e_vsi_alloc_arrays(vsi, true); +	if (ret) +		goto err_rings;  	/* Setup default MSIX irq handler for VSI */  	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);  	pf->vsi[vsi_idx] = vsi;  	ret = vsi_idx; -err_alloc_vsi: +	goto unlock_pf; + +err_rings: +	pf->next_vsi = i - 1; +	kfree(vsi); +unlock_pf:  	mutex_unlock(&pf->switch_mutex);  	return ret;  }  /** + * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI + * @type: VSI pointer + * @free_qvectors: a bool to specify if q_vectors need to be freed. + * + * On error: returns error code (negative) + * On success: returns 0 + **/ +static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) +{ +	/* free the ring and vector containers */ +	if (free_qvectors) { +		kfree(vsi->q_vectors); +		vsi->q_vectors = NULL; +	} +	kfree(vsi->tx_rings); +	vsi->tx_rings = NULL; +	vsi->rx_rings = NULL; +} + +/**   * i40e_vsi_clear - Deallocate the VSI provided   * @vsi: the VSI being un-configured   **/ @@ -5030,6 +6119,8 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)  	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);  	i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); +	i40e_vsi_free_arrays(vsi, true); +  	pf->vsi[vsi->idx] = NULL;  	if (vsi->idx < pf->next_vsi)  		pf->next_vsi = vsi->idx; @@ -5043,34 +6134,38 @@ free_vsi:  }  /** + * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI + * @vsi: the VSI being cleaned + **/ +static void i40e_vsi_clear_rings(struct i40e_vsi *vsi) +{ +	int i; + +	if (vsi->tx_rings && vsi->tx_rings[0]) { +		for (i = 0; i < vsi->alloc_queue_pairs; i++) { +			kfree_rcu(vsi->tx_rings[i], rcu); +			vsi->tx_rings[i] = NULL; +			vsi->rx_rings[i] = NULL; +		} +	} +} + +/**   * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI   * @vsi: the VSI being configured   **/  static int i40e_alloc_rings(struct i40e_vsi *vsi)  { +	struct i40e_ring *tx_ring, *rx_ring;  	struct i40e_pf *pf = vsi->back; -	int ret = 0;  	int i; -	vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs, -				sizeof(struct i40e_ring), GFP_KERNEL); -	if (!vsi->rx_rings) { -		ret = -ENOMEM; -		goto err_alloc_rings; -	} - -	vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs, -				sizeof(struct i40e_ring), GFP_KERNEL); -	if (!vsi->tx_rings) { -		ret = -ENOMEM; -		kfree(vsi->rx_rings); -		goto err_alloc_rings; -	} -  	/* Set basic values in the rings to be used later during open() */  	for (i = 0; i < vsi->alloc_queue_pairs; i++) { -		struct i40e_ring *rx_ring = &vsi->rx_rings[i]; -		struct i40e_ring *tx_ring = &vsi->tx_rings[i]; +		/* allocate space for both Tx and Rx in one shot */ +		tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); +		if (!tx_ring) +			goto err_out;  		tx_ring->queue_index = i;  		tx_ring->reg_idx = vsi->base_queue + i; @@ -5081,7 +6176,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)  		tx_ring->count = vsi->num_desc;  		tx_ring->size = 0;  		tx_ring->dcb_tc = 0; +		vsi->tx_rings[i] = tx_ring; +		rx_ring = &tx_ring[1];  		rx_ring->queue_index = i;  		rx_ring->reg_idx = vsi->base_queue + i;  		rx_ring->ring_active = false; @@ -5095,24 +6192,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)  			set_ring_16byte_desc_enabled(rx_ring);  		else  			clear_ring_16byte_desc_enabled(rx_ring); -	} - -err_alloc_rings: -	return ret; -} - -/** - * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI - * @vsi: the VSI being cleaned - **/ -static int i40e_vsi_clear_rings(struct i40e_vsi *vsi) -{ -	if (vsi) { -		kfree(vsi->rx_rings); -		kfree(vsi->tx_rings); +		vsi->rx_rings[i] = rx_ring;  	}  	return 0; + +err_out: +	i40e_vsi_clear_rings(vsi); +	return -ENOMEM;  }  /** @@ -5124,34 +6211,11 @@ static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)   **/  static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)  { -	int err = 0; - -	pf->num_msix_entries = 0; -	while (vectors >= I40E_MIN_MSIX) { -		err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors); -		if (err == 0) { -			/* good to go */ -			pf->num_msix_entries = vectors; -			break; -		} else if (err < 0) { -			/* total failure */ -			dev_info(&pf->pdev->dev, -				 "MSI-X vector reservation failed: %d\n", err); -			vectors = 0; -			break; -		} else { -			/* err > 0 is the hint for retry */ -			dev_info(&pf->pdev->dev, -				 "MSI-X vectors wanted %d, retrying with %d\n", -				 vectors, err); -			vectors = err; -		} -	} - -	if (vectors > 0 && vectors < I40E_MIN_MSIX) { +	vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, +					I40E_MIN_MSIX, vectors); +	if (vectors < 0) {  		dev_info(&pf->pdev->dev, -			 "Couldn't get enough vectors, only %d available\n", -			 vectors); +			 "MSI-X vector reservation failed: %d\n", vectors);  		vectors = 0;  	} @@ -5179,19 +6243,22 @@ static int i40e_init_msix(struct i40e_pf *pf)  	/* The number of vectors we'll request will be comprised of:  	 *   - Add 1 for "other" cause for Admin Queue events, etc.  	 *   - The number of LAN queue pairs -	 *        already adjusted for the NUMA node -	 *        assumes symmetric Tx/Rx pairing +	 *	- Queues being used for RSS. +	 *		We don't need as many as max_rss_size vectors. +	 *		use rss_size instead in the calculation since that +	 *		is governed by number of cpus in the system. +	 *	- assumes symmetric Tx/Rx pairing  	 *   - The number of VMDq pairs  	 * Once we count this up, try the request.  	 *  	 * If we can't get what we want, we'll simplify to nearly nothing  	 * and try again.  If that still fails, we punt.  	 */ -	pf->num_lan_msix = pf->num_lan_qps; +	pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);  	pf->num_vmdq_msix = pf->num_vmdq_qps;  	v_budget = 1 + pf->num_lan_msix;  	v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); -	if (pf->flags & I40E_FLAG_FDIR_ENABLED) +	if (pf->flags & I40E_FLAG_FD_SB_ENABLED)  		v_budget++;  	/* Scale down if necessary, and the rings will share vectors */ @@ -5205,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)  	for (i = 0; i < v_budget; i++)  		pf->msix_entries[i].entry = i;  	vec = i40e_reserve_msix_vectors(pf, v_budget); + +	if (vec != v_budget) { +		/* If we have limited resources, we will start with no vectors +		 * for the special features and then allocate vectors to some +		 * of these features based on the policy and at the end disable +		 * the features that did not get any vectors. +		 */ +		pf->num_vmdq_msix = 0; +	} +  	if (vec < I40E_MIN_MSIX) {  		pf->flags &= ~I40E_FLAG_MSIX_ENABLED;  		kfree(pf->msix_entries); @@ -5213,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)  	} else if (vec == I40E_MIN_MSIX) {  		/* Adjust for minimal MSIX use */ -		dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); -		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;  		pf->num_vmdq_vsis = 0;  		pf->num_vmdq_qps = 0; -		pf->num_vmdq_msix = 0;  		pf->num_lan_qps = 1;  		pf->num_lan_msix = 1;  	} else if (vec != v_budget) { +		/* reserve the misc vector */ +		vec--; +  		/* Scale vector usage down */  		pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */ -		vec--;                    /* reserve the misc vector */ +		pf->num_vmdq_vsis = 1;  		/* partition out the remaining vectors */  		switch (vec) {  		case 2: -			pf->num_vmdq_vsis = 1;  			pf->num_lan_msix = 1;  			break;  		case 3: -			pf->num_vmdq_vsis = 1;  			pf->num_lan_msix = 2;  			break;  		default: @@ -5245,20 +6320,58 @@ static int i40e_init_msix(struct i40e_pf *pf)  		}  	} +	if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && +	    (pf->num_vmdq_msix == 0)) { +		dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); +		pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; +	}  	return err;  }  /** - * i40e_alloc_q_vectors - Allocate memory for interrupt vectors + * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector + * @vsi: the VSI being configured + * @v_idx: index of the vector in the vsi struct + * + * We allocate one q_vector.  If allocation fails we return -ENOMEM. + **/ +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +{ +	struct i40e_q_vector *q_vector; + +	/* allocate q_vector */ +	q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); +	if (!q_vector) +		return -ENOMEM; + +	q_vector->vsi = vsi; +	q_vector->v_idx = v_idx; +	cpumask_set_cpu(v_idx, &q_vector->affinity_mask); +	if (vsi->netdev) +		netif_napi_add(vsi->netdev, &q_vector->napi, +			       i40e_napi_poll, NAPI_POLL_WEIGHT); + +	q_vector->rx.latency_range = I40E_LOW_LATENCY; +	q_vector->tx.latency_range = I40E_LOW_LATENCY; + +	/* tie q_vector and vsi together */ +	vsi->q_vectors[v_idx] = q_vector; + +	return 0; +} + +/** + * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors   * @vsi: the VSI being configured   *   * We allocate one q_vector per queue interrupt.  If allocation fails we   * return -ENOMEM.   **/ -static int i40e_alloc_q_vectors(struct i40e_vsi *vsi) +static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)  {  	struct i40e_pf *pf = vsi->back;  	int v_idx, num_q_vectors; +	int err;  	/* if not MSIX, give the one vector only to the LAN VSI */  	if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@ -5268,22 +6381,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)  	else  		return -EINVAL; -	vsi->q_vectors = kcalloc(num_q_vectors, -				 sizeof(struct i40e_q_vector), -				 GFP_KERNEL); -	if (!vsi->q_vectors) -		return -ENOMEM; -  	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { -		vsi->q_vectors[v_idx].vsi = vsi; -		vsi->q_vectors[v_idx].v_idx = v_idx; -		cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask); -		if (vsi->netdev) -			netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi, -				       i40e_napi_poll, vsi->work_limit); +		err = i40e_vsi_alloc_q_vector(vsi, v_idx); +		if (err) +			goto err_out;  	}  	return 0; + +err_out: +	while (v_idx--) +		i40e_free_q_vector(vsi, v_idx); + +	return err;  }  /** @@ -5297,13 +6407,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)  	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {  		err = i40e_init_msix(pf);  		if (err) { -			pf->flags &= ~(I40E_FLAG_RSS_ENABLED	   | -					I40E_FLAG_MQ_ENABLED	   | -					I40E_FLAG_DCB_ENABLED	   | -					I40E_FLAG_SRIOV_ENABLED	   | -					I40E_FLAG_FDIR_ENABLED	   | -					I40E_FLAG_FDIR_ATR_ENABLED | -					I40E_FLAG_VMDQ_ENABLED); +			pf->flags &= ~(I40E_FLAG_MSIX_ENABLED	| +				       I40E_FLAG_RSS_ENABLED	| +				       I40E_FLAG_DCB_CAPABLE	| +				       I40E_FLAG_SRIOV_ENABLED	| +				       I40E_FLAG_FD_SB_ENABLED	| +				       I40E_FLAG_FD_ATR_ENABLED	| +				       I40E_FLAG_VMDQ_ENABLED);  			/* rework the queue expectations without MSIX */  			i40e_determine_queue_usage(pf); @@ -5312,14 +6422,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)  	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&  	    (pf->flags & I40E_FLAG_MSI_ENABLED)) { +		dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");  		err = pci_enable_msi(pf->pdev);  		if (err) { -			dev_info(&pf->pdev->dev, -				 "MSI init failed (%d), trying legacy.\n", err); +			dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);  			pf->flags &= ~I40E_FLAG_MSI_ENABLED;  		}  	} +	if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) +		dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); +  	/* track first vector for misc interrupts */  	err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);  } @@ -5345,7 +6458,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)  				  i40e_intr, 0, pf->misc_int_name, pf);  		if (err) {  			dev_info(&pf->pdev->dev, -				 "request_irq for msix_misc failed: %d\n", err); +				 "request_irq for %s failed: %d\n", +				 pf->misc_int_name, err);  			return -EFAULT;  		}  	} @@ -5369,15 +6483,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)   **/  static int i40e_config_rss(struct i40e_pf *pf)  { -	struct i40e_hw *hw = &pf->hw; -	u32 lut = 0; -	int i, j; -	u64 hena;  	/* Set of random keys generated using kernel random number generator */  	static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,  				0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,  				0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,  				0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; +	struct i40e_hw *hw = &pf->hw; +	u32 lut = 0; +	int i, j; +	u64 hena;  	/* Fill out hash function seed */  	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) @@ -5386,16 +6500,7 @@ static int i40e_config_rss(struct i40e_pf *pf)  	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */  	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |  		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); -	hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | -		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)| -		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); +	hena |= I40E_DEFAULT_RSS_HENA;  	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);  	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); @@ -5424,6 +6529,33 @@ static int i40e_config_rss(struct i40e_pf *pf)  }  /** + * i40e_reconfig_rss_queues - change number of queues for rss and rebuild + * @pf: board private structure + * @queue_count: the requested queue count for rss. + * + * returns 0 if rss is not enabled, if enabled returns the final rss queue + * count which may be different from the requested queue count. + **/ +int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) +{ +	if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) +		return 0; + +	queue_count = min_t(int, queue_count, pf->rss_size_max); + +	if (queue_count != pf->rss_size) { +		i40e_prep_for_reset(pf); + +		pf->rss_size = queue_count; + +		i40e_reset_and_rebuild(pf, true); +		i40e_config_rss(pf); +	} +	dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size); +	return pf->rss_size; +} + +/**   * i40e_sw_init - Initialize general software structures (struct i40e_pf)   * @pf: board private structure to initialize   * @@ -5438,6 +6570,7 @@ static int i40e_sw_init(struct i40e_pf *pf)  	pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,  				(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); +	pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;  	if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {  		if (I40E_DEBUG_USER & debug)  			pf->hw.debug_mask = debug; @@ -5449,39 +6582,50 @@ static int i40e_sw_init(struct i40e_pf *pf)  	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |  		    I40E_FLAG_MSI_ENABLED     |  		    I40E_FLAG_MSIX_ENABLED    | -		    I40E_FLAG_RX_PS_ENABLED   | -		    I40E_FLAG_MQ_ENABLED      |  		    I40E_FLAG_RX_1BUF_ENABLED; +	/* Set default ITR */ +	pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; +	pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; + +	/* Depending on PF configurations, it is possible that the RSS +	 * maximum might end up larger than the available queues +	 */  	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; +	pf->rss_size_max = min_t(int, pf->rss_size_max, +				 pf->hw.func_caps.num_tx_qp);  	if (pf->hw.func_caps.rss) {  		pf->flags |= I40E_FLAG_RSS_ENABLED; -		pf->rss_size = min_t(int, pf->rss_size_max, -				     nr_cpus_node(numa_node_id())); +		pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());  	} else {  		pf->rss_size = 1;  	} -	if (pf->hw.func_caps.dcb) -		pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC; -	else -		pf->num_tc_qps = 0; +	/* MFP mode enabled */ +	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { +		pf->flags |= I40E_FLAG_MFP_ENABLED; +		dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); +	} -	if (pf->hw.func_caps.fd) { -		/* FW/NVM is not yet fixed in this regard */ -		if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || -		    (pf->hw.func_caps.fd_filters_best_effort > 0)) { -			pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED; -			dev_info(&pf->pdev->dev, -				 "Flow Director ATR mode Enabled\n"); -			pf->flags |= I40E_FLAG_FDIR_ENABLED; +	/* FW/NVM is not yet fixed in this regard */ +	if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || +	    (pf->hw.func_caps.fd_filters_best_effort > 0)) { +		pf->flags |= I40E_FLAG_FD_ATR_ENABLED; +		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; +		/* Setup a counter for fd_atr per pf */ +		pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); +		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { +			pf->flags |= I40E_FLAG_FD_SB_ENABLED; +			/* Setup a counter for fd_sb per pf */ +			pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); +		} else {  			dev_info(&pf->pdev->dev, -				 "Flow Director Side Band mode Enabled\n"); -			pf->fdir_pf_filter_count = -					 pf->hw.func_caps.fd_filters_guaranteed; +				 "Flow Director Sideband mode Disabled in MFP mode\n");  		} -	} else { -		pf->fdir_pf_filter_count = 0; +		pf->fdir_pf_filter_count = +				 pf->hw.func_caps.fd_filters_guaranteed; +		pf->hw.fdir_shared_filter_count = +				 pf->hw.func_caps.fd_filters_best_effort;  	}  	if (pf->hw.func_caps.vmdq) { @@ -5490,12 +6634,6 @@ static int i40e_sw_init(struct i40e_pf *pf)  		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;  	} -	/* MFP mode enabled */ -	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { -		pf->flags |= I40E_FLAG_MFP_ENABLED; -		dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); -	} -  #ifdef CONFIG_PCI_IOV  	if (pf->hw.func_caps.num_vfs) {  		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; @@ -5539,6 +6677,39 @@ sw_init_done:  }  /** + * i40e_set_ntuple - set the ntuple feature flag and take action + * @pf: board private structure to initialize + * @features: the feature set that the stack is suggesting + * + * returns a bool to indicate if reset needs to happen + **/ +bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) +{ +	bool need_reset = false; + +	/* Check if Flow Director n-tuple support was enabled or disabled.  If +	 * the state changed, we need to reset. +	 */ +	if (features & NETIF_F_NTUPLE) { +		/* Enable filters and mark for reset */ +		if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) +			need_reset = true; +		pf->flags |= I40E_FLAG_FD_SB_ENABLED; +	} else { +		/* turn off filters, mark for reset and clear SW filter list */ +		if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { +			need_reset = true; +			i40e_fdir_filter_exit(pf); +		} +		pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; +		/* if ATR was disabled it can be re-enabled. */ +		if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) +			pf->flags |= I40E_FLAG_FD_ATR_ENABLED; +	} +	return need_reset; +} + +/**   * i40e_set_features - set the netdev feature flags   * @netdev: ptr to the netdev being adjusted   * @features: the feature set that the stack is suggesting @@ -5548,15 +6719,210 @@ static int i40e_set_features(struct net_device *netdev,  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; +	bool need_reset;  	if (features & NETIF_F_HW_VLAN_CTAG_RX)  		i40e_vlan_stripping_enable(vsi);  	else  		i40e_vlan_stripping_disable(vsi); +	need_reset = i40e_set_ntuple(pf, features); + +	if (need_reset) +		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); +  	return 0;  } +#ifdef CONFIG_I40E_VXLAN +/** + * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port + * @pf: board private structure + * @port: The UDP port to look up + * + * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found + **/ +static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) +{ +	u8 i; + +	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { +		if (pf->vxlan_ports[i] == port) +			return i; +	} + +	return i; +} + +/** + * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: New UDP port number that VXLAN started listening to + **/ +static void i40e_add_vxlan_port(struct net_device *netdev, +				sa_family_t sa_family, __be16 port) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; +	u8 next_idx; +	u8 idx; + +	if (sa_family == AF_INET6) +		return; + +	idx = i40e_get_vxlan_port_idx(pf, port); + +	/* Check if port already exists */ +	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { +		netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); +		return; +	} + +	/* Now check if there is space to add the new port */ +	next_idx = i40e_get_vxlan_port_idx(pf, 0); + +	if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { +		netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", +			    ntohs(port)); +		return; +	} + +	/* New port: add it and mark its index in the bitmap */ +	pf->vxlan_ports[next_idx] = port; +	pf->pending_vxlan_bitmap |= (1 << next_idx); + +	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; +} + +/** + * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away + * @netdev: This physical port's netdev + * @sa_family: Socket Family that VXLAN is notifying us about + * @port: UDP port number that VXLAN stopped listening to + **/ +static void i40e_del_vxlan_port(struct net_device *netdev, +				sa_family_t sa_family, __be16 port) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; +	u8 idx; + +	if (sa_family == AF_INET6) +		return; + +	idx = i40e_get_vxlan_port_idx(pf, port); + +	/* Check if port already exists */ +	if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { +		/* if port exists, set it to 0 (mark for deletion) +		 * and make it pending +		 */ +		pf->vxlan_ports[idx] = 0; + +		pf->pending_vxlan_bitmap |= (1 << idx); + +		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; +	} else { +		netdev_warn(netdev, "Port %d was not found, not deleting\n", +			    ntohs(port)); +	} +} + +#endif +#ifdef HAVE_FDB_OPS +#ifdef USE_CONST_DEV_UC_CHAR +static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], +			    struct net_device *dev, +			    const unsigned char *addr, +			    u16 flags) +#else +static int i40e_ndo_fdb_add(struct ndmsg *ndm, +			    struct net_device *dev, +			    unsigned char *addr, +			    u16 flags) +#endif +{ +	struct i40e_netdev_priv *np = netdev_priv(dev); +	struct i40e_pf *pf = np->vsi->back; +	int err = 0; + +	if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) +		return -EOPNOTSUPP; + +	/* Hardware does not support aging addresses so if a +	 * ndm_state is given only allow permanent addresses +	 */ +	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { +		netdev_info(dev, "FDB only supports static addresses\n"); +		return -EINVAL; +	} + +	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) +		err = dev_uc_add_excl(dev, addr); +	else if (is_multicast_ether_addr(addr)) +		err = dev_mc_add_excl(dev, addr); +	else +		err = -EINVAL; + +	/* Only return duplicate errors if NLM_F_EXCL is set */ +	if (err == -EEXIST && !(flags & NLM_F_EXCL)) +		err = 0; + +	return err; +} + +#ifndef USE_DEFAULT_FDB_DEL_DUMP +#ifdef USE_CONST_DEV_UC_CHAR +static int i40e_ndo_fdb_del(struct ndmsg *ndm, +			    struct net_device *dev, +			    const unsigned char *addr) +#else +static int i40e_ndo_fdb_del(struct ndmsg *ndm, +			    struct net_device *dev, +			    unsigned char *addr) +#endif +{ +	struct i40e_netdev_priv *np = netdev_priv(dev); +	struct i40e_pf *pf = np->vsi->back; +	int err = -EOPNOTSUPP; + +	if (ndm->ndm_state & NUD_PERMANENT) { +		netdev_info(dev, "FDB only supports static addresses\n"); +		return -EINVAL; +	} + +	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { +		if (is_unicast_ether_addr(addr)) +			err = dev_uc_del(dev, addr); +		else if (is_multicast_ether_addr(addr)) +			err = dev_mc_del(dev, addr); +		else +			err = -EINVAL; +	} + +	return err; +} + +static int i40e_ndo_fdb_dump(struct sk_buff *skb, +			     struct netlink_callback *cb, +			     struct net_device *dev, +			     int idx) +{ +	struct i40e_netdev_priv *np = netdev_priv(dev); +	struct i40e_pf *pf = np->vsi->back; + +	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) +		idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + +	return idx; +} + +#endif /* USE_DEFAULT_FDB_DEL_DUMP */ +#endif /* HAVE_FDB_OPS */  static const struct net_device_ops i40e_netdev_ops = {  	.ndo_open		= i40e_open,  	.ndo_stop		= i40e_close, @@ -5566,6 +6932,7 @@ static const struct net_device_ops i40e_netdev_ops = {  	.ndo_validate_addr	= eth_validate_addr,  	.ndo_set_mac_address	= i40e_set_mac,  	.ndo_change_mtu		= i40e_change_mtu, +	.ndo_do_ioctl		= i40e_ioctl,  	.ndo_tx_timeout		= i40e_tx_timeout,  	.ndo_vlan_rx_add_vid	= i40e_vlan_rx_add_vid,  	.ndo_vlan_rx_kill_vid	= i40e_vlan_rx_kill_vid, @@ -5576,8 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {  	.ndo_set_features	= i40e_set_features,  	.ndo_set_vf_mac		= i40e_ndo_set_vf_mac,  	.ndo_set_vf_vlan	= i40e_ndo_set_vf_port_vlan, -	.ndo_set_vf_tx_rate	= i40e_ndo_set_vf_bw, +	.ndo_set_vf_rate	= i40e_ndo_set_vf_bw,  	.ndo_get_vf_config	= i40e_ndo_get_vf_config, +	.ndo_set_vf_link_state	= i40e_ndo_set_vf_link_state, +	.ndo_set_vf_spoofchk	= i40e_ndo_set_vf_spoofck, +#ifdef CONFIG_I40E_VXLAN +	.ndo_add_vxlan_port	= i40e_add_vxlan_port, +	.ndo_del_vxlan_port	= i40e_del_vxlan_port, +#endif +#ifdef HAVE_FDB_OPS +	.ndo_fdb_add		= i40e_ndo_fdb_add, +#ifndef USE_DEFAULT_FDB_DEL_DUMP +	.ndo_fdb_del		= i40e_ndo_fdb_del, +	.ndo_fdb_dump		= i40e_ndo_fdb_dump, +#endif +#endif  };  /** @@ -5588,6 +6968,7 @@ static const struct net_device_ops i40e_netdev_ops = {   **/  static int i40e_config_netdev(struct i40e_vsi *vsi)  { +	u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};  	struct i40e_pf *pf = vsi->back;  	struct i40e_hw *hw = &pf->hw;  	struct i40e_netdev_priv *np; @@ -5604,10 +6985,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  	np = netdev_priv(netdev);  	np->vsi = vsi; -	netdev->hw_enc_features = NETIF_F_IP_CSUM	 | +	netdev->hw_enc_features |= NETIF_F_IP_CSUM	 |  				  NETIF_F_GSO_UDP_TUNNEL | -				  NETIF_F_TSO		 | -				  NETIF_F_SG; +				  NETIF_F_TSO;  	netdev->features = NETIF_F_SG		       |  			   NETIF_F_IP_CSUM	       | @@ -5619,17 +6999,29 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  			   NETIF_F_HW_VLAN_CTAG_FILTER |  			   NETIF_F_IPV6_CSUM	       |  			   NETIF_F_TSO		       | +			   NETIF_F_TSO_ECN	       |  			   NETIF_F_TSO6		       |  			   NETIF_F_RXCSUM	       |  			   NETIF_F_RXHASH	       |  			   0; +	if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) +		netdev->features |= NETIF_F_NTUPLE; +  	/* copy netdev features into list of user selectable features */  	netdev->hw_features |= netdev->features;  	if (vsi->type == I40E_VSI_MAIN) {  		SET_NETDEV_DEV(netdev, &pf->pdev->dev); -		memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); +		ether_addr_copy(mac_addr, hw->mac.perm_addr); +		/* The following two steps are necessary to prevent reception +		 * of tagged packets - by default the NVM loads a MAC-VLAN +		 * filter that will accept any tagged packet.  This is to +		 * prevent that during normal operations until a specific +		 * VLAN tag filter has been set. +		 */ +		i40e_rm_default_mac_filter(vsi, mac_addr); +		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);  	} else {  		/* relate the VSI_VMDQ name to the VSI_MAIN name */  		snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -5637,9 +7029,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)  		random_ether_addr(mac_addr);  		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);  	} +	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); -	memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); -	memcpy(netdev->perm_addr, mac_addr, ETH_ALEN); +	ether_addr_copy(netdev->dev_addr, mac_addr); +	ether_addr_copy(netdev->perm_addr, mac_addr);  	/* vlan gets same features (except vlan offload)  	 * after any tweaks for specific VSI types  	 */ @@ -5670,12 +7063,7 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)  	if (vsi == vsi->back->vsi[vsi->back->lan_vsi])  		return; -	/* there is no HW VSI for FDIR */ -	if (vsi->type == I40E_VSI_FDIR) -		return; -  	i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); -	return;  }  /** @@ -5757,12 +7145,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  		break;  	case I40E_VSI_FDIR: -		/* no queue mapping or actual HW VSI needed */ -		vsi->info.valid_sections = 0; -		vsi->seid = 0; -		vsi->id = 0; +		ctxt.pf_num = hw->pf_id; +		ctxt.vf_num = 0; +		ctxt.uplink_seid = vsi->uplink_seid; +		ctxt.connection_type = 0x1;     /* regular data port */ +		ctxt.flags = I40E_AQ_VSI_TYPE_PF;  		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); -		return 0;  		break;  	case I40E_VSI_VMDQ2: @@ -5801,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)  		ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);  		ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; +		if (pf->vf[vsi->vf_id].spoofchk) { +			ctxt.info.valid_sections |= +				cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); +			ctxt.info.sec_flags |= +				(I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | +				 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); +		}  		/* Setup the VSI tx/rx queue map for TC0 only for now */  		i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);  		break; @@ -5883,15 +7278,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi)  			if (vsi->netdev) {  				/* results in a call to i40e_close() */  				unregister_netdev(vsi->netdev); -				free_netdev(vsi->netdev); -				vsi->netdev = NULL;  			}  		} else { -			if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) -				i40e_down(vsi); -			i40e_vsi_free_irq(vsi); -			i40e_vsi_free_tx_resources(vsi); -			i40e_vsi_free_rx_resources(vsi); +			i40e_vsi_close(vsi);  		}  		i40e_vsi_disable_irq(vsi);  	} @@ -5903,6 +7292,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)  	i40e_vsi_delete(vsi);  	i40e_vsi_free_q_vectors(vsi); +	if (vsi->netdev) { +		free_netdev(vsi->netdev); +		vsi->netdev = NULL; +	}  	i40e_vsi_clear_rings(vsi);  	i40e_vsi_clear(vsi); @@ -5914,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)  	 * the orphan VEBs yet.  We'll wait for an explicit remove request  	 * from up the network stack.  	 */ -	for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) { +	for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {  		if (pf->vsi[i] &&  		    pf->vsi[i]->uplink_seid == uplink_seid &&  		    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { @@ -5950,20 +7343,19 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)  	int ret = -ENOENT;  	struct i40e_pf *pf = vsi->back; -	if (vsi->q_vectors) { +	if (vsi->q_vectors[0]) {  		dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",  			 vsi->seid);  		return -EEXIST;  	}  	if (vsi->base_vector) { -		dev_info(&pf->pdev->dev, -			 "VSI %d has non-zero base vector %d\n", +		dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",  			 vsi->seid, vsi->base_vector);  		return -EEXIST;  	} -	ret = i40e_alloc_q_vectors(vsi); +	ret = i40e_vsi_alloc_q_vectors(vsi);  	if (ret) {  		dev_info(&pf->pdev->dev,  			 "failed to allocate %d q_vector for VSI %d, ret=%d\n", @@ -5972,11 +7364,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)  		goto vector_setup_out;  	} -	vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, -					 vsi->num_q_vectors, vsi->idx); +	if (vsi->num_q_vectors) +		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, +						 vsi->num_q_vectors, vsi->idx);  	if (vsi->base_vector < 0) {  		dev_info(&pf->pdev->dev, -			 "failed to get q tracking for VSI %d, err=%d\n", +			 "failed to get queue tracking for VSI %d, err=%d\n",  			 vsi->seid, vsi->base_vector);  		i40e_vsi_free_q_vectors(vsi);  		ret = -ENOENT; @@ -5988,6 +7381,69 @@ vector_setup_out:  }  /** + * i40e_vsi_reinit_setup - return and reallocate resources for a VSI + * @vsi: pointer to the vsi. + * + * This re-allocates a vsi's queue resources. + * + * Returns pointer to the successfully allocated and configured VSI sw struct + * on success, otherwise returns NULL on failure. + **/ +static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) +{ +	struct i40e_pf *pf = vsi->back; +	u8 enabled_tc; +	int ret; + +	i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); +	i40e_vsi_clear_rings(vsi); + +	i40e_vsi_free_arrays(vsi, false); +	i40e_set_num_rings_in_vsi(vsi); +	ret = i40e_vsi_alloc_arrays(vsi, false); +	if (ret) +		goto err_vsi; + +	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); +	if (ret < 0) { +		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", +			 vsi->seid, ret); +		goto err_vsi; +	} +	vsi->base_queue = ret; + +	/* Update the FW view of the VSI. Force a reset of TC and queue +	 * layout configurations. +	 */ +	enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; +	pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; +	pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; +	i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + +	/* assign it some queues */ +	ret = i40e_alloc_rings(vsi); +	if (ret) +		goto err_rings; + +	/* map all of the rings to the q_vectors */ +	i40e_vsi_map_rings_to_vectors(vsi); +	return vsi; + +err_rings: +	i40e_vsi_free_q_vectors(vsi); +	if (vsi->netdev_registered) { +		vsi->netdev_registered = false; +		unregister_netdev(vsi->netdev); +		free_netdev(vsi->netdev); +		vsi->netdev = NULL; +	} +	i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); +err_vsi: +	i40e_vsi_clear(vsi); +	return NULL; +} + +/**   * i40e_vsi_setup - Set up a VSI by a given type   * @pf: board private structure   * @type: VSI type @@ -6030,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,  	if (!veb && uplink_seid != pf->mac_seid) { -		for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { +		for (i = 0; i < pf->num_alloc_vsi; i++) {  			if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {  				vsi = pf->vsi[i];  				break; @@ -6067,6 +7523,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,  	if (v_idx < 0)  		goto err_alloc;  	vsi = pf->vsi[v_idx]; +	if (!vsi) +		goto err_alloc;  	vsi->type = type;  	vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); @@ -6075,7 +7533,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,  	else if (type == I40E_VSI_SRIOV)  		vsi->vf_id = param1;  	/* assign it some queues */ -	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); +	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, +				vsi->idx);  	if (ret < 0) {  		dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",  			 vsi->seid, ret); @@ -6101,6 +7560,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,  			goto err_netdev;  		vsi->netdev_registered = true;  		netif_carrier_off(vsi->netdev); +#ifdef CONFIG_I40E_DCB +		/* Setup DCB netlink interface */ +		i40e_dcbnl_setup(vsi); +#endif /* CONFIG_I40E_DCB */  		/* fall through */  	case I40E_VSI_FDIR: @@ -6266,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)  	 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing  	 *       the VEB itself, so don't use (*branch) after this loop.  	 */ -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { +	for (i = 0; i < pf->num_alloc_vsi; i++) {  		if (!pf->vsi[i])  			continue;  		if (pf->vsi[i]->uplink_seid == branch_seid && @@ -6318,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)  	pf = veb->pf;  	/* find the remaining VSI and check for extras */ -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { +	for (i = 0; i < pf->num_alloc_vsi; i++) {  		if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {  			n++;  			vsi = pf->vsi[i]; @@ -6347,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)  	i40e_aq_delete_element(&pf->hw, veb->seid, NULL);  	i40e_veb_clear(veb); - -	return;  }  /** @@ -6358,12 +7819,14 @@ void i40e_veb_release(struct i40e_veb *veb)   **/  static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)  { -	bool is_default = (vsi->idx == vsi->back->lan_vsi); +	bool is_default = false; +	bool is_cloud = false;  	int ret;  	/* get a VEB from the hardware */  	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, -			      veb->enabled_tc, is_default, &veb->seid, NULL); +			      veb->enabled_tc, is_default, +			      is_cloud, &veb->seid, NULL);  	if (ret) {  		dev_info(&veb->pf->pdev->dev,  			 "couldn't add VEB, err %d, aq_err %d\n", @@ -6430,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,  	}  	/* make sure there is such a vsi and uplink */ -	for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++) +	for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)  		if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)  			break; -	if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) { +	if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {  		dev_info(&pf->pdev->dev, "vsi seid %d not found\n",  			 vsi_seid);  		return NULL; @@ -6468,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,  	ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);  	if (ret)  		goto err_veb; +	if (vsi_idx == pf->lan_vsi) +		pf->lan_veb = veb->idx;  	return veb; @@ -6603,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)  				 "header: %d reported %d total\n",  				 num_reported, num_total); -		if (num_reported) { -			int sz = sizeof(*sw_config) * num_reported; - -			kfree(pf->sw_config); -			pf->sw_config = kzalloc(sz, GFP_KERNEL); -			if (pf->sw_config) -				memcpy(pf->sw_config, sw_config, sz); -		} -  		for (i = 0; i < num_reported; i++) {  			struct i40e_aqc_switch_config_element_resp *ele =  				&sw_config->element[i]; @@ -6628,11 +8084,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)  /**   * i40e_setup_pf_switch - Setup the HW switch on startup or after reset   * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized.   *   * Returns 0 on success, negative value on failure   **/ -static int i40e_setup_pf_switch(struct i40e_pf *pf) +static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)  { +	u32 rxfc = 0, txfc = 0, rxfc_reg;  	int ret;  	/* find out what's out there already */ @@ -6645,14 +8103,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)  	}  	i40e_pf_reset_stats(pf); -	/* fdir VSI must happen first to be sure it gets queue 0, but only -	 * if there is enough room for the fdir VSI -	 */ -	if (pf->num_lan_qps > 1) -		i40e_fdir_setup(pf); -  	/* first time setup */ -	if (pf->lan_vsi == I40E_NO_VSI) { +	if (pf->lan_vsi == I40E_NO_VSI || reinit) {  		struct i40e_vsi *vsi = NULL;  		u16 uplink_seid; @@ -6663,19 +8115,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)  			uplink_seid = pf->veb[pf->lan_veb]->seid;  		else  			uplink_seid = pf->mac_seid; - -		vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); +		if (pf->lan_vsi == I40E_NO_VSI) +			vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); +		else if (reinit) +			vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);  		if (!vsi) {  			dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");  			i40e_fdir_teardown(pf);  			return -EAGAIN;  		} -		/* accommodate kcompat by copying the main VSI queue count -		 * into the pf, since this newer code pushes the pf queue -		 * info down a level into a VSI -		 */ -		pf->num_rx_queues = vsi->alloc_queue_pairs; -		pf->num_tx_queues = vsi->alloc_queue_pairs;  	} else {  		/* force a reset of TC and queue layout configurations */  		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; @@ -6685,6 +8133,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)  	}  	i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); +	i40e_fdir_sb_setup(pf); +  	/* Setup static PF queue filter control settings */  	ret = i40e_setup_pf_filter_control(pf);  	if (ret) { @@ -6703,37 +8153,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)  	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);  	i40e_link_event(pf); -	/* Initialize user-specifics link properties */ +	/* Initialize user-specific link properties */  	pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &  				  I40E_AQ_AN_COMPLETED) ? true : false); -	pf->hw.fc.requested_mode = I40E_FC_DEFAULT; -	if (pf->hw.phy.link_info.an_info & -	   (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX)) +	/* requested_mode is set in probe or by ethtool */ +	if (!pf->fc_autoneg_status) +		goto no_autoneg; + +	if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) && +	    (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))  		pf->hw.fc.current_mode = I40E_FC_FULL;  	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)  		pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;  	else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)  		pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;  	else -		pf->hw.fc.current_mode = I40E_FC_DEFAULT; +		pf->hw.fc.current_mode = I40E_FC_NONE; -	return ret; -} +	/* sync the flow control settings with the auto-neg values */ +	switch (pf->hw.fc.current_mode) { +	case I40E_FC_FULL: +		txfc = 1; +		rxfc = 1; +		break; +	case I40E_FC_TX_PAUSE: +		txfc = 1; +		rxfc = 0; +		break; +	case I40E_FC_RX_PAUSE: +		txfc = 0; +		rxfc = 1; +		break; +	case I40E_FC_NONE: +	case I40E_FC_DEFAULT: +		txfc = 0; +		rxfc = 0; +		break; +	case I40E_FC_PFC: +		/* TBD */ +		break; +	/* no default case, we have to handle all possibilities here */ +	} -/** - * i40e_set_rss_size - helper to set rss_size - * @pf: board private structure - * @queues_left: how many queues - */ -static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left) -{ -	int num_tc0; +	wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT); + +	rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) & +		   ~I40E_PRTDCB_MFLCN_RFCE_MASK; +	rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT); + +	wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg); + +	goto fc_complete; -	num_tc0 = min_t(int, queues_left, pf->rss_size_max); -	num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id())); -	num_tc0 = rounddown_pow_of_two(num_tc0); +no_autoneg: +	/* disable L2 flow control, user can turn it on if they wish */ +	wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0); +	wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) & +					 ~I40E_PRTDCB_MFLCN_RFCE_MASK); -	return num_tc0; +fc_complete: +	i40e_ptp_init(pf); + +	return ret;  }  /** @@ -6742,12 +8223,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)   **/  static void i40e_determine_queue_usage(struct i40e_pf *pf)  { -	int accum_tc_size;  	int queues_left;  	pf->num_lan_qps = 0; -	pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps); -	accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;  	/* Find the max queues to be put into basic use.  We'll always be  	 * using TC0, whether or not DCB is running, and TC0 will get the @@ -6755,99 +8233,56 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)  	 */  	queues_left = pf->hw.func_caps.num_tx_qp; -	if   (!((pf->flags & I40E_FLAG_MSIX_ENABLED)		 && -		(pf->flags & I40E_FLAG_MQ_ENABLED))		 || -		!(pf->flags & (I40E_FLAG_RSS_ENABLED | -		I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) || -		(queues_left == 1)) { - +	if ((queues_left == 1) || +	    !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {  		/* one qp for PF, no queues for anything else */  		queues_left = 0;  		pf->rss_size = pf->num_lan_qps = 1;  		/* make sure all the fancies are disabled */ -		pf->flags &= ~(I40E_FLAG_RSS_ENABLED       | -				I40E_FLAG_MQ_ENABLED	   | -				I40E_FLAG_FDIR_ENABLED	   | -				I40E_FLAG_FDIR_ATR_ENABLED | -				I40E_FLAG_DCB_ENABLED	   | -				I40E_FLAG_SRIOV_ENABLED	   | -				I40E_FLAG_VMDQ_ENABLED); - -	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  && -		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  && -		   !(pf->flags & I40E_FLAG_DCB_ENABLED)) { - -		pf->rss_size = i40e_set_rss_size(pf, queues_left); - -		queues_left -= pf->rss_size; -		pf->num_lan_qps = pf->rss_size; - -	} else if (pf->flags & I40E_FLAG_RSS_ENABLED	  && -		   !(pf->flags & I40E_FLAG_FDIR_ENABLED)  && -		   (pf->flags & I40E_FLAG_DCB_ENABLED)) { - -		/* save num_tc_qps queues for TCs 1 thru 7 and the rest -		 * are set up for RSS in TC0 -		 */ -		queues_left -= accum_tc_size; - -		pf->rss_size = i40e_set_rss_size(pf, queues_left); - -		queues_left -= pf->rss_size; -		if (queues_left < 0) { -			dev_info(&pf->pdev->dev, "not enough queues for DCB\n"); -			return; -		} - -		pf->num_lan_qps = pf->rss_size + accum_tc_size; - -	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   && -		  (pf->flags & I40E_FLAG_FDIR_ENABLED) && -		  !(pf->flags & I40E_FLAG_DCB_ENABLED)) { - -		queues_left -= 1; /* save 1 queue for FD */ - -		pf->rss_size = i40e_set_rss_size(pf, queues_left); +		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	| +			       I40E_FLAG_FD_SB_ENABLED	| +			       I40E_FLAG_FD_ATR_ENABLED	| +			       I40E_FLAG_DCB_CAPABLE	| +			       I40E_FLAG_SRIOV_ENABLED	| +			       I40E_FLAG_VMDQ_ENABLED); +	} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | +				  I40E_FLAG_FD_SB_ENABLED | +				  I40E_FLAG_FD_ATR_ENABLED | +				  I40E_FLAG_DCB_CAPABLE))) { +		/* one qp for PF */ +		pf->rss_size = pf->num_lan_qps = 1; +		queues_left -= pf->num_lan_qps; -		queues_left -= pf->rss_size; -		if (queues_left < 0) { -			dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n"); -			return; +		pf->flags &= ~(I40E_FLAG_RSS_ENABLED	| +			       I40E_FLAG_FD_SB_ENABLED	| +			       I40E_FLAG_FD_ATR_ENABLED	| +			       I40E_FLAG_DCB_ENABLED	| +			       I40E_FLAG_VMDQ_ENABLED); +	} else { +		/* Not enough queues for all TCs */ +		if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && +		    (queues_left < I40E_MAX_TRAFFIC_CLASS)) { +			pf->flags &= ~I40E_FLAG_DCB_CAPABLE; +			dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");  		} +		pf->num_lan_qps = pf->rss_size_max; +		queues_left -= pf->num_lan_qps; +	} -		pf->num_lan_qps = pf->rss_size; - -	} else if (pf->flags & I40E_FLAG_RSS_ENABLED   && -		  (pf->flags & I40E_FLAG_FDIR_ENABLED) && -		  (pf->flags & I40E_FLAG_DCB_ENABLED)) { - -		/* save 1 queue for TCs 1 thru 7, -		 * 1 queue for flow director, -		 * and the rest are set up for RSS in TC0 -		 */ -		queues_left -= 1; -		queues_left -= accum_tc_size; - -		pf->rss_size = i40e_set_rss_size(pf, queues_left); -		queues_left -= pf->rss_size; -		if (queues_left < 0) { -			dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n"); -			return; +	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { +		if (queues_left > 1) { +			queues_left -= 1; /* save 1 queue for FD */ +		} else { +			pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; +			dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");  		} - -		pf->num_lan_qps = pf->rss_size + accum_tc_size; - -	} else { -		dev_info(&pf->pdev->dev, -			 "Invalid configuration, flags=0x%08llx\n", pf->flags); -		return;  	}  	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&  	    pf->num_vf_qps && pf->num_req_vfs && queues_left) { -		pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left / -							       pf->num_vf_qps)); +		pf->num_req_vfs = min_t(int, pf->num_req_vfs, +					(queues_left / pf->num_vf_qps));  		queues_left -= (pf->num_req_vfs * pf->num_vf_qps);  	} @@ -6858,7 +8293,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)  		queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);  	} -	return; +	pf->queues_left = queues_left;  }  /** @@ -6879,7 +8314,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)  	settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;  	/* Flow Director is enabled */ -	if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED)) +	if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))  		settings->enable_fdir = true;  	/* Ethtype and MACVLAN filters enabled for PF */ @@ -6892,6 +8327,45 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)  	return 0;  } +#define INFO_STRING_LEN 255 +static void i40e_print_features(struct i40e_pf *pf) +{ +	struct i40e_hw *hw = &pf->hw; +	char *buf, *string; + +	string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); +	if (!string) { +		dev_err(&pf->pdev->dev, "Features string allocation failed\n"); +		return; +	} + +	buf = string; + +	buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id); +#ifdef CONFIG_PCI_IOV +	buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); +#endif +	buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, +		       pf->vsi[pf->lan_vsi]->num_queue_pairs); + +	if (pf->flags & I40E_FLAG_RSS_ENABLED) +		buf += sprintf(buf, "RSS "); +	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) +		buf += sprintf(buf, "FD_ATR "); +	if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { +		buf += sprintf(buf, "FD_SB "); +		buf += sprintf(buf, "NTUPLE "); +	} +	if (pf->flags & I40E_FLAG_DCB_CAPABLE) +		buf += sprintf(buf, "DCB "); +	if (pf->flags & I40E_FLAG_PTP) +		buf += sprintf(buf, "PTP "); + +	BUG_ON(buf > (string + INFO_STRING_LEN)); +	dev_info(&pf->pdev->dev, "%s\n", string); +	kfree(string); +} +  /**   * i40e_probe - Device initialization routine   * @pdev: PCI device information struct @@ -6905,28 +8379,27 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)   **/  static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  { -	struct i40e_driver_version dv;  	struct i40e_pf *pf;  	struct i40e_hw *hw; +	static u16 pfs_found; +	u16 link_status;  	int err = 0;  	u32 len; +	u32 i;  	err = pci_enable_device_mem(pdev);  	if (err)  		return err;  	/* set up for high or low dma */ -	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { -		/* coherent mask for the same size will always succeed if -		 * dma_set_mask does -		 */ -		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); -	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { -		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); -	} else { -		dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); -		err = -EIO; -		goto err_dma; +	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); +	if (err) { +		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); +		if (err) { +			dev_err(&pdev->dev, +				"DMA configuration failed: 0x%x\n", err); +			goto err_dma; +		}  	}  	/* set up pci connections */ @@ -6973,6 +8446,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	hw->subsystem_device_id = pdev->subsystem_device;  	hw->bus.device = PCI_SLOT(pdev->devfn);  	hw->bus.func = PCI_FUNC(pdev->devfn); +	pf->instance = pfs_found; + +	/* do a special CORER for clearing PXE mode once at init */ +	if (hw->revision_id == 0 && +	    (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { +		wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); +		i40e_flush(hw); +		msleep(200); +		pf->corer_count++; + +		i40e_clear_pxe_mode(hw); +	}  	/* Reset here to make sure all is clean and to define PF 'n' */  	err = i40e_pf_reset(hw); @@ -6997,6 +8482,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pf_reset;  	} +	/* set up a default setting for link flow control */ +	pf->hw.fc.requested_mode = I40E_FC_NONE; +  	err = i40e_init_adminq(hw);  	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));  	if (err) { @@ -7007,6 +8495,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		goto err_pf_reset;  	} +	i40e_verify_eeprom(pf); + +	/* Rev 0 hardware was never productized */ +	if (hw->revision_id < 1) +		dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); + +	i40e_clear_pxe_mode(hw);  	err = i40e_get_capabilities(pf);  	if (err)  		goto err_adminq_setup; @@ -7033,16 +8528,24 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	}  	i40e_get_mac_addr(hw, hw->mac.addr); -	if (i40e_validate_mac_addr(hw->mac.addr)) { +	if (!is_valid_ether_addr(hw->mac.addr)) {  		dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);  		err = -EIO;  		goto err_mac_addr;  	}  	dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); -	memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN); +	ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);  	pci_set_drvdata(pdev, pf);  	pci_save_state(pdev); +#ifdef CONFIG_I40E_DCB +	err = i40e_init_pf_dcb(pf); +	if (err) { +		dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); +		pf->flags &= ~I40E_FLAG_DCB_CAPABLE; +		/* Continue without DCB enabled */ +	} +#endif /* CONFIG_I40E_DCB */  	/* set up periodic task facility */  	setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); @@ -7053,23 +8556,44 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;  	pf->link_check_timeout = jiffies; +	/* WoL defaults to disabled */ +	pf->wol_en = false; +	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); +  	/* set up the main switch operations */  	i40e_determine_queue_usage(pf);  	i40e_init_interrupt_scheme(pf); -	/* Set up the *vsi struct based on the number of VSIs in the HW, -	 * and set up our local tracking of the MAIN PF vsi. +	/* The number of VSIs reported by the FW is the minimum guaranteed +	 * to us; HW supports far more and we share the remaining pool with +	 * the other PFs. We allocate space for more than the guarantee with +	 * the understanding that we might not get them all later.  	 */ -	len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis; +	if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) +		pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; +	else +		pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; + +	/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ +	len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;  	pf->vsi = kzalloc(len, GFP_KERNEL); -	if (!pf->vsi) +	if (!pf->vsi) { +		err = -ENOMEM;  		goto err_switch_setup; +	} -	err = i40e_setup_pf_switch(pf); +	err = i40e_setup_pf_switch(pf, false);  	if (err) {  		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);  		goto err_vsis;  	} +	/* if FDIR VSI was set up, start it now */ +	for (i = 0; i < pf->num_alloc_vsi; i++) { +		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { +			i40e_vsi_open(pf->vsi[i]); +			break; +		} +	}  	/* The main driver is (mostly) up and happy. We need to set this state  	 * before setting up the misc vector or we get a race and the vector @@ -7091,9 +8615,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		}  	} +#ifdef CONFIG_PCI_IOV  	/* prep for VF support */  	if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && -	    (pf->flags & I40E_FLAG_MSIX_ENABLED)) { +	    (pf->flags & I40E_FLAG_MSIX_ENABLED) && +	    !test_bit(__I40E_BAD_EEPROM, &pf->state)) {  		u32 val;  		/* disable link interrupts for VFs */ @@ -7101,29 +8627,64 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;  		wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);  		i40e_flush(hw); + +		if (pci_num_vf(pdev)) { +			dev_info(&pdev->dev, +				 "Active VFs found, allocating resources.\n"); +			err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); +			if (err) +				dev_info(&pdev->dev, +					 "Error %d allocating resources for existing VFs\n", +					 err); +		}  	} +#endif /* CONFIG_PCI_IOV */ + +	pfs_found++;  	i40e_dbg_pf_init(pf);  	/* tell the firmware that we're starting */ -	dv.major_version = DRV_VERSION_MAJOR; -	dv.minor_version = DRV_VERSION_MINOR; -	dv.build_version = DRV_VERSION_BUILD; -	dv.subbuild_version = 0; -	i40e_aq_send_driver_version(&pf->hw, &dv, NULL); +	i40e_send_version(pf);  	/* since everything's happy, start the service_task timer */  	mod_timer(&pf->service_timer,  		  round_jiffies(jiffies + pf->service_timer_period)); +	/* Get the negotiated link width and speed from PCI config space */ +	pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); + +	i40e_set_pci_config_data(hw, link_status); + +	dev_info(&pdev->dev, "PCI-Express: %s %s\n", +		(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : +		 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : +		 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : +		 "Unknown"), +		(hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" : +		 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" : +		 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" : +		 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" : +		 "Unknown")); + +	if (hw->bus.width < i40e_bus_width_pcie_x8 || +	    hw->bus.speed < i40e_bus_speed_8000) { +		dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); +		dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); +	} + +	/* print a string summarizing features */ +	i40e_print_features(pf); +  	return 0;  	/* Unwind what we've done if something failed in the setup */  err_vsis:  	set_bit(__I40E_DOWN, &pf->state); -err_switch_setup:  	i40e_clear_interrupt_scheme(pf);  	kfree(pf->vsi); +err_switch_setup: +	i40e_reset_interrupt_capability(pf);  	del_timer_sync(&pf->service_timer);  err_mac_addr:  err_configure_lan_hmc: @@ -7166,16 +8727,18 @@ static void i40e_remove(struct pci_dev *pdev)  	i40e_dbg_pf_exit(pf); -	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { -		i40e_free_vfs(pf); -		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; -	} +	i40e_ptp_stop(pf);  	/* no more scheduling of any task */  	set_bit(__I40E_DOWN, &pf->state);  	del_timer_sync(&pf->service_timer);  	cancel_work_sync(&pf->service_task); +	if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { +		i40e_free_vfs(pf); +		pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; +	} +  	i40e_fdir_teardown(pf);  	/* If there is a switch structure or any orphans, remove them. @@ -7203,13 +8766,15 @@ static void i40e_remove(struct pci_dev *pdev)  	}  	/* shutdown and destroy the HMC */ -	ret_code = i40e_shutdown_lan_hmc(&pf->hw); -	if (ret_code) -		dev_warn(&pdev->dev, -			 "Failed to destroy the HMC resources: %d\n", ret_code); +	if (pf->hw.hmc.hmc_obj) { +		ret_code = i40e_shutdown_lan_hmc(&pf->hw); +		if (ret_code) +			dev_warn(&pdev->dev, +				 "Failed to destroy the HMC resources: %d\n", +				 ret_code); +	}  	/* shutdown the adminq */ -	i40e_aq_queue_shutdown(&pf->hw, true);  	ret_code = i40e_shutdown_adminq(&pf->hw);  	if (ret_code)  		dev_warn(&pdev->dev, @@ -7218,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)  	/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */  	i40e_clear_interrupt_scheme(pf); -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { +	for (i = 0; i < pf->num_alloc_vsi; i++) {  		if (pf->vsi[i]) {  			i40e_vsi_clear_rings(pf->vsi[i]);  			i40e_vsi_clear(pf->vsi[i]); @@ -7233,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)  	kfree(pf->qp_pile);  	kfree(pf->irq_pile); -	kfree(pf->sw_config);  	kfree(pf->vsi);  	/* force a PF reset to clean anything leftover */ @@ -7266,7 +8830,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,  	dev_info(&pdev->dev, "%s: error %d\n", __func__, error);  	/* shutdown all operations */ -	i40e_pf_quiesce_all_vsi(pf); +	if (!test_bit(__I40E_SUSPENDED, &pf->state)) { +		rtnl_lock(); +		i40e_prep_for_reset(pf); +		rtnl_unlock(); +	}  	/* Request a slot reset */  	return PCI_ERS_RESULT_NEED_RESET; @@ -7329,9 +8897,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)  	struct i40e_pf *pf = pci_get_drvdata(pdev);  	dev_info(&pdev->dev, "%s\n", __func__); +	if (test_bit(__I40E_SUSPENDED, &pf->state)) +		return; + +	rtnl_lock();  	i40e_handle_reset_warning(pf); +	rtnl_lock(); +} + +/** + * i40e_shutdown - PCI callback for shutting down + * @pdev: PCI device information struct + **/ +static void i40e_shutdown(struct pci_dev *pdev) +{ +	struct i40e_pf *pf = pci_get_drvdata(pdev); +	struct i40e_hw *hw = &pf->hw; + +	set_bit(__I40E_SUSPENDED, &pf->state); +	set_bit(__I40E_DOWN, &pf->state); +	rtnl_lock(); +	i40e_prep_for_reset(pf); +	rtnl_unlock(); + +	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); +	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + +	if (system_state == SYSTEM_POWER_OFF) { +		pci_wake_from_d3(pdev, pf->wol_en); +		pci_set_power_state(pdev, PCI_D3hot); +	} +} + +#ifdef CONFIG_PM +/** + * i40e_suspend - PCI callback for moving to D3 + * @pdev: PCI device information struct + **/ +static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) +{ +	struct i40e_pf *pf = pci_get_drvdata(pdev); +	struct i40e_hw *hw = &pf->hw; + +	set_bit(__I40E_SUSPENDED, &pf->state); +	set_bit(__I40E_DOWN, &pf->state); +	rtnl_lock(); +	i40e_prep_for_reset(pf); +	rtnl_unlock(); + +	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); +	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + +	pci_wake_from_d3(pdev, pf->wol_en); +	pci_set_power_state(pdev, PCI_D3hot); + +	return 0;  } +/** + * i40e_resume - PCI callback for waking up from D3 + * @pdev: PCI device information struct + **/ +static int i40e_resume(struct pci_dev *pdev) +{ +	struct i40e_pf *pf = pci_get_drvdata(pdev); +	u32 err; + +	pci_set_power_state(pdev, PCI_D0); +	pci_restore_state(pdev); +	/* pci_restore_state() clears dev->state_saves, so +	 * call pci_save_state() again to restore it. +	 */ +	pci_save_state(pdev); + +	err = pci_enable_device_mem(pdev); +	if (err) { +		dev_err(&pdev->dev, +			"%s: Cannot enable PCI device from suspend\n", +			__func__); +		return err; +	} +	pci_set_master(pdev); + +	/* no wakeup events while running */ +	pci_wake_from_d3(pdev, false); + +	/* handling the reset will rebuild the device state */ +	if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { +		clear_bit(__I40E_DOWN, &pf->state); +		rtnl_lock(); +		i40e_reset_and_rebuild(pf, false); +		rtnl_unlock(); +	} + +	return 0; +} + +#endif  static const struct pci_error_handlers i40e_err_handler = {  	.error_detected = i40e_pci_error_detected,  	.slot_reset = i40e_pci_error_slot_reset, @@ -7343,6 +9005,11 @@ static struct pci_driver i40e_driver = {  	.id_table = i40e_pci_tbl,  	.probe    = i40e_probe,  	.remove   = i40e_remove, +#ifdef CONFIG_PM +	.suspend  = i40e_suspend, +	.resume   = i40e_resume, +#endif +	.shutdown = i40e_shutdown,  	.err_handler = &i40e_err_handler,  	.sriov_configure = i40e_pci_sriov_configure,  }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 97e1bb30ef8..81299189a47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -28,14 +27,14 @@  #include "i40e_prototype.h"  /** - *  i40e_init_nvm_ops - Initialize NVM function pointers. - *  @hw: pointer to the HW structure. + * i40e_init_nvm_ops - Initialize NVM function pointers + * @hw: pointer to the HW structure   * - *  Setups the function pointers and the NVM info structure. Should be called - *  once per NVM initialization, e.g. inside the i40e_init_shared_code(). - *  Please notice that the NVM term is used here (& in all methods covered - *  in this file) as an equivalent of the FLASH part mapped into the SR. - *  We are accessing FLASH always thru the Shadow RAM. + * Setup the function pointers and the NVM info structure. Should be called + * once per NVM initialization, e.g. inside the i40e_init_shared_code(). + * Please notice that the NVM term is used here (& in all methods covered + * in this file) as an equivalent of the FLASH part mapped into the SR. + * We are accessing FLASH always thru the Shadow RAM.   **/  i40e_status i40e_init_nvm(struct i40e_hw *hw)  { @@ -50,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)  	gens = rd32(hw, I40E_GLNVM_GENS);  	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>  			   I40E_GLNVM_GENS_SR_SIZE_SHIFT); -	/* Switching to words (sr_size contains power of 2KB). */ +	/* Switching to words (sr_size contains power of 2KB) */  	nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; -	/* Check if we are in the normal or blank NVM programming mode. */ +	/* Check if we are in the normal or blank NVM programming mode */  	fla = rd32(hw, I40E_GLNVM_FLA); -	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */ -		/* Max NVM timeout. */ +	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */ +		/* Max NVM timeout */  		nvm->timeout = I40E_MAX_NVM_TIMEOUT;  		nvm->blank_nvm_mode = false; -	} else { /* Blank programming mode. */ +	} else { /* Blank programming mode */  		nvm->blank_nvm_mode = true;  		ret_code = I40E_ERR_NVM_BLANK_MODE;  		hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); @@ -69,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)  }  /** - *  i40e_acquire_nvm - Generic request for acquiring the NVM ownership. - *  @hw: pointer to the HW structure. - *  @access: NVM access type (read or write). + * i40e_acquire_nvm - Generic request for acquiring the NVM ownership + * @hw: pointer to the HW structure + * @access: NVM access type (read or write)   * - *  This function will request NVM ownership for reading - *  via the proper Admin Command. + * This function will request NVM ownership for reading + * via the proper Admin Command.   **/  i40e_status i40e_acquire_nvm(struct i40e_hw *hw,  				       enum i40e_aq_resource_access_type access) @@ -88,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,  	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,  					    0, &time, NULL); -	/* Reading the Global Device Timer. */ +	/* Reading the Global Device Timer */  	gtime = rd32(hw, I40E_GLVFGEN_TIMER); -	/* Store the timeout. */ +	/* Store the timeout */  	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;  	if (ret_code) { -		/* Set the polling timeout. */ +		/* Set the polling timeout */  		if (time > I40E_MAX_NVM_TIMEOUT)  			timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)  				  + gtime;  		else  			timeout = hw->nvm.hw_semaphore_timeout; -		/* Poll until the current NVM owner timeouts. */ +		/* Poll until the current NVM owner timeouts */  		while (gtime < timeout) {  			usleep_range(10000, 20000);  			ret_code = i40e_aq_request_resource(hw, @@ -129,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:  }  /** - *  i40e_release_nvm - Generic request for releasing the NVM ownership. - *  @hw: pointer to the HW structure. + * i40e_release_nvm - Generic request for releasing the NVM ownership + * @hw: pointer to the HW structure   * - *  This function will release NVM resource via the proper Admin Command. + * This function will release NVM resource via the proper Admin Command.   **/  void i40e_release_nvm(struct i40e_hw *hw)  { @@ -141,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)  }  /** - *  i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit. - *  @hw: pointer to the HW structure. + * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit + * @hw: pointer to the HW structure   * - *  Polls the SRCTL Shadow RAM register done bit. + * Polls the SRCTL Shadow RAM register done bit.   **/  static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)  {  	i40e_status ret_code = I40E_ERR_TIMEOUT;  	u32 srctl, wait_cnt; -	/* Poll the I40E_GLNVM_SRCTL until the done bit is set. */ +	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */  	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {  		srctl = rd32(hw, I40E_GLNVM_SRCTL);  		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { @@ -161,20 +160,20 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)  		udelay(5);  	}  	if (ret_code == I40E_ERR_TIMEOUT) -		hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); +		hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");  	return ret_code;  }  /** - *  i40e_read_nvm_srctl - Reads Shadow RAM. - *  @hw: pointer to the HW structure. - *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - *  @data: word read from the Shadow RAM. + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM   * - *  Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.   **/ -static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset, -						 u16 *data) +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, +					 u16 *data)  {  	i40e_status ret_code = I40E_ERR_TIMEOUT;  	u32 sr_reg; @@ -185,15 +184,15 @@ static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,  		goto read_nvm_exit;  	} -	/* Poll the done bit first. */ +	/* Poll the done bit first */  	ret_code = i40e_poll_sr_srctl_done_bit(hw);  	if (!ret_code) { -		/* Write the address and start reading. */ +		/* Write the address and start reading */  		sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |  			 (1 << I40E_GLNVM_SRCTL_START_SHIFT);  		wr32(hw, I40E_GLNVM_SRCTL, sr_reg); -		/* Poll I40E_GLNVM_SRCTL until the done bit is set. */ +		/* Poll I40E_GLNVM_SRCTL until the done bit is set */  		ret_code = i40e_poll_sr_srctl_done_bit(hw);  		if (!ret_code) {  			sr_reg = rd32(hw, I40E_GLNVM_SRDATA); @@ -211,80 +210,45 @@ read_nvm_exit:  }  /** - *  i40e_read_nvm_word - Reads Shadow RAM word. - *  @hw: pointer to the HW structure. - *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - *  @data: word read from the Shadow RAM. - * - *  Reads 16 bit word from the Shadow RAM. Each read is preceded - *  with the NVM ownership taking and followed by the release. - **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, -					 u16 *data) -{ -	i40e_status ret_code = 0; - -	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); -	if (!ret_code) { -		ret_code = i40e_read_nvm_srctl(hw, offset, data); -		i40e_release_nvm(hw); -	} - -	return ret_code; -} - -/** - *  i40e_read_nvm_buffer - Reads Shadow RAM buffer. - *  @hw: pointer to the HW structure. - *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - *  @words: number of words to read (in) & - *          number of words read before the NVM ownership timeout (out). - *  @data: words read from the Shadow RAM. + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM   * - *  Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - *  method. The buffer read is preceded by the NVM ownership take - *  and followed by the release. + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release.   **/  i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,  					   u16 *words, u16 *data)  {  	i40e_status ret_code = 0;  	u16 index, word; -	u32 time; -	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); -	if (!ret_code) { -		/* Loop thru the selected region. */ -		for (word = 0; word < *words; word++) { -			index = offset + word; -			ret_code = i40e_read_nvm_srctl(hw, index, &data[word]); -			if (ret_code) -				break; -			/* Check if we didn't exceeded the semaphore timeout. */ -			time = rd32(hw, I40E_GLVFGEN_TIMER); -			if (time >= hw->nvm.hw_semaphore_timeout) { -				ret_code = I40E_ERR_TIMEOUT; -				hw_dbg(hw, "NVM read error: timeout.\n"); -				break; -			} -		} -		/* Update the number of words read from the Shadow RAM. */ -		*words = word; -		/* Release the NVM ownership. */ -		i40e_release_nvm(hw); +	/* Loop thru the selected region */ +	for (word = 0; word < *words; word++) { +		index = offset + word; +		ret_code = i40e_read_nvm_word(hw, index, &data[word]); +		if (ret_code) +			break;  	} +	/* Update the number of words read from the Shadow RAM */ +	*words = word; +  	return ret_code;  }  /** - *  i40e_calc_nvm_checksum - Calculates and returns the checksum - *  @hw: pointer to hardware structure + * i40e_calc_nvm_checksum - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @checksum: pointer to the checksum   * - *  This function calculate SW Checksum that covers the whole 64kB shadow RAM - *  except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD - *  is customer specific and unknown. Therefore, this function skips all maximum - *  possible size of VPD (1kB). + * This function calculates SW Checksum that covers the whole 64kB shadow RAM + * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD + * is customer specific and unknown. Therefore, this function skips all maximum + * possible size of VPD (1kB).   **/  static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,  						    u16 *checksum) @@ -297,14 +261,14 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,  	u32 i = 0;  	/* read pointer to VPD area */ -	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module); +	ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);  	if (ret_code) {  		ret_code = I40E_ERR_NVM_CHECKSUM;  		goto i40e_calc_nvm_checksum_exit;  	}  	/* read pointer to PCIe Alt Auto-load module */ -	ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, +	ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,  				       &pcie_alt_module);  	if (ret_code) {  		ret_code = I40E_ERR_NVM_CHECKSUM; @@ -331,7 +295,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,  				break;  		} -		ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word); +		ret_code = i40e_read_nvm_word(hw, (u16)i, &word);  		if (ret_code) {  			ret_code = I40E_ERR_NVM_CHECKSUM;  			goto i40e_calc_nvm_checksum_exit; @@ -346,19 +310,19 @@ i40e_calc_nvm_checksum_exit:  }  /** - *  i40e_validate_nvm_checksum - Validate EEPROM checksum - *  @hw: pointer to hardware structure - *  @checksum: calculated checksum + * i40e_validate_nvm_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum: calculated checksum   * - *  Performs checksum calculation and validates the NVM SW checksum. If the - *  caller does not need checksum, the value can be NULL. + * Performs checksum calculation and validates the NVM SW checksum. If the + * caller does not need checksum, the value can be NULL.   **/  i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,  						 u16 *checksum)  {  	i40e_status ret_code = 0;  	u16 checksum_sr = 0; -	u16 checksum_local; +	u16 checksum_local = 0;  	ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);  	if (ret_code) @@ -371,7 +335,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,  	/* Do not use i40e_read_nvm_word() because we do not want to take  	 * the synchronization semaphores twice here.  	 */ -	i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); +	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);  	/* Verify read checksum from EEPROM is the same as  	 * calculated checksum diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 702c81ba86e..ecd0f0b663c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index f75bb9ccc90..a430699c41d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -51,7 +50,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,  				void *buff, /* can be NULL */  				u16  buff_size,  				struct i40e_asq_cmd_details *cmd_details); -bool i40e_asq_done(struct i40e_hw *hw);  /* debug function for adminq */  void i40e_debug_aq(struct i40e_hw *hw, @@ -60,10 +58,11 @@ void i40e_debug_aq(struct i40e_hw *hw,  		   void *buffer);  void i40e_idle_aq(struct i40e_hw *hw); -void i40e_resume_aq(struct i40e_hw *hw); +bool i40e_check_asq_alive(struct i40e_hw *hw); +i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);  u32 i40e_led_get(struct i40e_hw *hw); -void i40e_led_set(struct i40e_hw *hw, u32 mode); +void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);  /* admin send queue commands */ @@ -71,12 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,  				u16 *fw_major_version, u16 *fw_minor_version,  				u16 *api_major_version, u16 *api_minor_version,  				struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, -					     bool unloading); -i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw, +i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,  				struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, +				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, @@ -95,9 +94,9 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,  				u16 vsi_id, bool set_filter,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, -				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, -				u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,  				struct i40e_vsi_context *vsi_ctx,  				struct i40e_asq_cmd_details *cmd_details); @@ -106,7 +105,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,  				u16 downlink_seid, u8 enabled_tc, -				bool default_port, u16 *pveb_seid, +				bool default_port, bool enable_l2_filtering, +				u16 *pveb_seid,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,  				u16 veb_seid, u16 *switch_id, bool *floating, @@ -119,12 +119,6 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,  i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,  			struct i40e_aqc_remove_macvlan_element_data *mv_list,  			u16 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id, -			struct i40e_aqc_add_remove_vlan_element_data *v_list, -			u8 count, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id, -			struct i40e_aqc_add_remove_vlan_element_data *v_list, -			u8 count, struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,  				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,  				struct i40e_asq_cmd_details *cmd_details); @@ -164,11 +158,22 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, +				u16 udp_port, u8 protocol_index, +				u8 *filter_index, +				struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, +				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,  				    u16 flags, u8 *mac_addr,  				    struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, +				u16 seid, u16 credit, u8 max_credit, +				struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, +				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,  				enum i40e_aq_hmc_profile profile,  				u8 pe_vf_enabled_count, @@ -179,6 +184,15 @@ i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,  i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,  			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,  			struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, +		u16 seid, +		struct i40e_aqc_configure_switching_comp_ets_data *ets_data, +		enum i40e_admin_queue_opc opcode, +		struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, +	u16 seid, +	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, +	struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,  			u16 seid,  			struct i40e_aqc_query_vsi_bw_config_resp *bw_data, @@ -207,8 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);  i40e_status i40e_get_mac_addr(struct i40e_hw *hw,  						u8 *mac_addr);  i40e_status i40e_validate_mac_addr(u8 *mac_addr); -i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, -					struct i40e_lldp_variables *lldp_cfg); +void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);  /* prototype for functions used for NVM access */  i40e_status i40e_init_nvm(struct i40e_hw *hw);  i40e_status i40e_acquire_nvm(struct i40e_hw *hw, @@ -222,6 +235,14 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,  					   u16 *words, u16 *data);  i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,  						 u16 *checksum); +void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); + +extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ +	return i40e_ptype_lookup[ptype]; +}  /* prototype for functions used for SW locks */ @@ -236,4 +257,9 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,  				struct i40e_asq_cmd_details *cmd_details);  i40e_status i40e_set_filter_control(struct i40e_hw *hw,  				struct i40e_filter_control_settings *settings); +i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, +				u8 *mac_addr, u16 ethtype, u16 flags, +				u16 vsi_seid, u16 queue, bool is_add, +				struct i40e_control_filter_stats *stats, +				struct i40e_asq_cmd_details *cmd_details);  #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c new file mode 100644 index 00000000000..101f439acda --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -0,0 +1,625 @@ +/******************************************************************************* + * + * Intel Ethernet Controller XL710 Family Linux Driver + * Copyright(c) 2013 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#include "i40e.h" +#include <linux/export.h> +#include <linux/ptp_classify.h> + +/* The XL710 timesync is very much like Intel's 82599 design when it comes to + * the fundamental clock design. However, the clock operations are much simpler + * in the XL710 because the device supports a full 64 bits of nanoseconds. + * Because the field is so wide, we can forgo the cycle counter and just + * operate with the nanosecond field directly without fear of overflow. + * + * Much like the 82599, the update period is dependent upon the link speed: + * At 40Gb link or no link, the period is 1.6ns. + * At 10Gb link, the period is multiplied by 2. (3.2ns) + * At 1Gb link, the period is multiplied by 20. (32ns) + * 1588 functionality is not supported at 100Mbps. + */ +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL 0x0333333333ULL +#define I40E_PTP_1GB_INCVAL  0x2000000000ULL + +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \ +					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \ +					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) + +/** + * i40e_ptp_read - Read the PHC time from the device + * @pf: Board private structure + * @ts: timespec structure to hold the current time value + * + * This function reads the PRTTSYN_TIME registers and stores them in a + * timespec. However, since the registers are 64 bits of nanoseconds, we must + * convert the result to a timespec before we can return. + **/ +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) +{ +	struct i40e_hw *hw = &pf->hw; +	u32 hi, lo; +	u64 ns; + +	/* The timer latches on the lowest register read. */ +	lo = rd32(hw, I40E_PRTTSYN_TIME_L); +	hi = rd32(hw, I40E_PRTTSYN_TIME_H); + +	ns = (((u64)hi) << 32) | lo; + +	*ts = ns_to_timespec(ns); +} + +/** + * i40e_ptp_write - Write the PHC time to the device + * @pf: Board private structure + * @ts: timespec structure that holds the new time value + * + * This function writes the PRTTSYN_TIME registers with the user value. Since + * we receive a timespec from the stack, we must convert that timespec into + * nanoseconds before programming the registers. + **/ +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts) +{ +	struct i40e_hw *hw = &pf->hw; +	u64 ns = timespec_to_ns(ts); + +	/* The timer will not update until the high register is written, so +	 * write the low register first. +	 */ +	wr32(hw, I40E_PRTTSYN_TIME_L, ns & 0xFFFFFFFF); +	wr32(hw, I40E_PRTTSYN_TIME_H, ns >> 32); +} + +/** + * i40e_ptp_convert_to_hwtstamp - Convert device clock to system time + * @hwtstamps: Timestamp structure to update + * @timestamp: Timestamp from the hardware + * + * We need to convert the NIC clock value into a hwtstamp which can be used by + * the upper level timestamping functions. Since the timestamp is simply a 64- + * bit nanosecond value, we can call ns_to_ktime directly to handle this. + **/ +static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps, +					 u64 timestamp) +{ +	memset(hwtstamps, 0, sizeof(*hwtstamps)); + +	hwtstamps->hwtstamp = ns_to_ktime(timestamp); +} + +/** + * i40e_ptp_adjfreq - Adjust the PHC frequency + * @ptp: The PTP clock structure + * @ppb: Parts per billion adjustment from the base + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ +	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); +	struct i40e_hw *hw = &pf->hw; +	u64 adj, freq, diff; +	int neg_adj = 0; + +	if (ppb < 0) { +		neg_adj = 1; +		ppb = -ppb; +	} + +	smp_mb(); /* Force any pending update before accessing. */ +	adj = ACCESS_ONCE(pf->ptp_base_adj); + +	freq = adj; +	freq *= ppb; +	diff = div_u64(freq, 1000000000ULL); + +	if (neg_adj) +		adj -= diff; +	else +		adj += diff; + +	wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); +	wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); + +	return 0; +} + +/** + * i40e_ptp_adjtime - Adjust the PHC time + * @ptp: The PTP clock structure + * @delta: Offset in nanoseconds to adjust the PHC time by + * + * Adjust the frequency of the PHC by the indicated parts per billion from the + * base frequency. + **/ +static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ +	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); +	struct timespec now, then = ns_to_timespec(delta); +	unsigned long flags; + +	spin_lock_irqsave(&pf->tmreg_lock, flags); + +	i40e_ptp_read(pf, &now); +	now = timespec_add(now, then); +	i40e_ptp_write(pf, (const struct timespec *)&now); + +	spin_unlock_irqrestore(&pf->tmreg_lock, flags); + +	return 0; +} + +/** + * i40e_ptp_gettime - Get the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure to hold the current time value + * + * Read the device clock and return the correct value on ns, after converting it + * into a timespec struct. + **/ +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ +	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); +	unsigned long flags; + +	spin_lock_irqsave(&pf->tmreg_lock, flags); +	i40e_ptp_read(pf, ts); +	spin_unlock_irqrestore(&pf->tmreg_lock, flags); + +	return 0; +} + +/** + * i40e_ptp_settime - Set the time of the PHC + * @ptp: The PTP clock structure + * @ts: timespec structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + **/ +static int i40e_ptp_settime(struct ptp_clock_info *ptp, +			    const struct timespec *ts) +{ +	struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); +	unsigned long flags; + +	spin_lock_irqsave(&pf->tmreg_lock, flags); +	i40e_ptp_write(pf, ts); +	spin_unlock_irqrestore(&pf->tmreg_lock, flags); + +	return 0; +} + +/** + * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem + * @ptp: The PTP clock structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * The XL710 does not support any of the ancillary features of the PHC + * subsystem, so this function may just return. + **/ +static int i40e_ptp_enable(struct ptp_clock_info *ptp, +			   struct ptp_clock_request *rq, int on) +{ +	return -EOPNOTSUPP; +} + +/** + * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung + * @vsi: The VSI with the rings relevant to 1588 + * + * This watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to timestamp + * any future packets. + **/ +void i40e_ptp_rx_hang(struct i40e_vsi *vsi) +{ +	struct i40e_pf *pf = vsi->back; +	struct i40e_hw *hw = &pf->hw; +	struct i40e_ring *rx_ring; +	unsigned long rx_event; +	u32 prttsyn_stat; +	int n; + +	if (pf->flags & I40E_FLAG_PTP) +		return; + +	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + +	/* Unless all four receive timestamp registers are latched, we are not +	 * concerned about a possible PTP Rx hang, so just update the timeout +	 * counter and exit. +	 */ +	if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK << +			       I40E_PRTTSYN_STAT_1_RXT0_SHIFT) | +			      (I40E_PRTTSYN_STAT_1_RXT1_MASK << +			       I40E_PRTTSYN_STAT_1_RXT1_SHIFT) | +			      (I40E_PRTTSYN_STAT_1_RXT2_MASK << +			       I40E_PRTTSYN_STAT_1_RXT2_SHIFT) | +			      (I40E_PRTTSYN_STAT_1_RXT3_MASK << +			       I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) { +		pf->last_rx_ptp_check = jiffies; +		return; +	} + +	/* Determine the most recent watchdog or rx_timestamp event. */ +	rx_event = pf->last_rx_ptp_check; +	for (n = 0; n < vsi->num_queue_pairs; n++) { +		rx_ring = vsi->rx_rings[n]; +		if (time_after(rx_ring->last_rx_timestamp, rx_event)) +			rx_event = rx_ring->last_rx_timestamp; +	} + +	/* Only need to read the high RXSTMP register to clear the lock */ +	if (time_is_before_jiffies(rx_event + 5 * HZ)) { +		rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); +		rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); +		rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); +		rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); +		pf->last_rx_ptp_check = jiffies; +		pf->rx_hwtstamp_cleared++; +		dev_warn(&vsi->back->pdev->dev, +			 "%s: clearing Rx timestamp hang\n", +			 __func__); +	} +} + +/** + * i40e_ptp_tx_hwtstamp - Utility function which returns the Tx timestamp + * @pf: Board private structure + * + * Read the value of the Tx timestamp from the registers, convert it into a + * value consumable by the stack, and store that result into the shhwtstamps + * struct before returning it up the stack. + **/ +void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) +{ +	struct skb_shared_hwtstamps shhwtstamps; +	struct i40e_hw *hw = &pf->hw; +	u32 hi, lo; +	u64 ns; + +	lo = rd32(hw, I40E_PRTTSYN_TXTIME_L); +	hi = rd32(hw, I40E_PRTTSYN_TXTIME_H); + +	ns = (((u64)hi) << 32) | lo; + +	i40e_ptp_convert_to_hwtstamp(&shhwtstamps, ns); +	skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); +	dev_kfree_skb_any(pf->ptp_tx_skb); +	pf->ptp_tx_skb = NULL; +} + +/** + * i40e_ptp_rx_hwtstamp - Utility function which checks for an Rx timestamp + * @pf: Board private structure + * @skb: Particular skb to send timestamp with + * @index: Index into the receive timestamp registers for the timestamp + * + * The XL710 receives a notification in the receive descriptor with an offset + * into the set of RXTIME registers where the timestamp is for that skb. This + * function goes and fetches the receive timestamp from that offset, if a valid + * one exists. The RXTIME registers are in ns, so we must convert the result + * first. + **/ +void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) +{ +	u32 prttsyn_stat, hi, lo; +	struct i40e_hw *hw; +	u64 ns; + +	/* Since we cannot turn off the Rx timestamp logic if the device is +	 * doing Tx timestamping, check if Rx timestamping is configured. +	 */ +	if (!pf->ptp_rx) +		return; + +	hw = &pf->hw; + +	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); + +	if (!(prttsyn_stat & (1 << index))) +		return; + +	lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); +	hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); + +	ns = (((u64)hi) << 32) | lo; + +	i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); +} + +/** + * i40e_ptp_set_increment - Utility function to update clock increment rate + * @pf: Board private structure + * + * During a link change, the DMA frequency that drives the 1588 logic will + * change. In order to keep the PRTTSYN_TIME registers in units of nanoseconds, + * we must update the increment value per clock tick. + **/ +void i40e_ptp_set_increment(struct i40e_pf *pf) +{ +	struct i40e_link_status *hw_link_info; +	struct i40e_hw *hw = &pf->hw; +	u64 incval; + +	hw_link_info = &hw->phy.link_info; + +	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); + +	switch (hw_link_info->link_speed) { +	case I40E_LINK_SPEED_10GB: +		incval = I40E_PTP_10GB_INCVAL; +		break; +	case I40E_LINK_SPEED_1GB: +		incval = I40E_PTP_1GB_INCVAL; +		break; +	case I40E_LINK_SPEED_100MB: +		dev_warn(&pf->pdev->dev, +			 "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", +			 __func__); +		incval = 0; +		break; +	case I40E_LINK_SPEED_40GB: +	default: +		incval = I40E_PTP_40GB_INCVAL; +		break; +	} + +	/* Write the new increment value into the increment register. The +	 * hardware will not update the clock until both registers have been +	 * written. +	 */ +	wr32(hw, I40E_PRTTSYN_INC_L, incval & 0xFFFFFFFF); +	wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); + +	/* Update the base adjustement value. */ +	ACCESS_ONCE(pf->ptp_base_adj) = incval; +	smp_mb(); /* Force the above update. */ +} + +/** + * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Obtain the current hardware timestamping settigs as requested. To do this, + * keep a shadow copy of the timestamp settings rather than attempting to + * deconstruct it from the registers. + **/ +int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ +	struct hwtstamp_config *config = &pf->tstamp_config; + +	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? +		-EFAULT : 0; +} + +/** + * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping + * @pf: Board private structure + * @ifreq: ioctl data + * + * Respond to the user filter requests and make the appropriate hardware + * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping + * logic, so keep track in software of whether to indicate these timestamps + * or not. + * + * It is permissible to "upgrade" the user request to a broader filter, as long + * as the user receives the timestamps they care about and the user is notified + * the filter has been broadened. + **/ +int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) +{ +	struct i40e_hw *hw = &pf->hw; +	struct hwtstamp_config *config = &pf->tstamp_config; +	u32 pf_id, tsyntype, regval; + +	if (copy_from_user(config, ifr->ifr_data, sizeof(*config))) +		return -EFAULT; + +	/* Reserved for future extensions. */ +	if (config->flags) +		return -EINVAL; + +	/* Confirm that 1588 is supported on this PF. */ +	pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> +		I40E_PRTTSYN_CTL0_PF_ID_SHIFT; +	if (hw->pf_id != pf_id) +		return -EINVAL; + +	switch (config->tx_type) { +	case HWTSTAMP_TX_OFF: +		pf->ptp_tx = false; +		break; +	case HWTSTAMP_TX_ON: +		pf->ptp_tx = true; +		break; +	default: +		return -ERANGE; +	} + +	switch (config->rx_filter) { +	case HWTSTAMP_FILTER_NONE: +		pf->ptp_rx = false; +		tsyntype = 0; +		break; +	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: +	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: +	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: +		pf->ptp_rx = true; +		tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | +			   I40E_PRTTSYN_CTL1_TSYNTYPE_V1 | +			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK; +		config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; +		break; +	case HWTSTAMP_FILTER_PTP_V2_EVENT: +	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: +	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: +	case HWTSTAMP_FILTER_PTP_V2_SYNC: +	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: +	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: +	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: +	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: +	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: +		pf->ptp_rx = true; +		tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | +			   I40E_PRTTSYN_CTL1_TSYNTYPE_V2 | +			   I40E_PRTTSYN_CTL1_UDP_ENA_MASK; +		config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; +		break; +	case HWTSTAMP_FILTER_ALL: +	default: +		return -ERANGE; +	} + +	/* Clear out all 1588-related registers to clear and unlatch them. */ +	rd32(hw, I40E_PRTTSYN_STAT_0); +	rd32(hw, I40E_PRTTSYN_TXTIME_H); +	rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); +	rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); +	rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); +	rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); + +	/* Enable/disable the Tx timestamp interrupt based on user input. */ +	regval = rd32(hw, I40E_PRTTSYN_CTL0); +	if (pf->ptp_tx) +		regval |= I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; +	else +		regval &= ~I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK; +	wr32(hw, I40E_PRTTSYN_CTL0, regval); + +	regval = rd32(hw, I40E_PFINT_ICR0_ENA); +	if (pf->ptp_tx) +		regval |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; +	else +		regval &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; +	wr32(hw, I40E_PFINT_ICR0_ENA, regval); + +	/* There is no simple on/off switch for Rx. To "disable" Rx support, +	 * ignore any received timestamps, rather than turn off the clock. +	 */ +	if (pf->ptp_rx) { +		regval = rd32(hw, I40E_PRTTSYN_CTL1); +		/* clear everything but the enable bit */ +		regval &= I40E_PRTTSYN_CTL1_TSYNENA_MASK; +		/* now enable bits for desired Rx timestamps */ +		regval |= tsyntype; +		wr32(hw, I40E_PRTTSYN_CTL1, regval); +	} + +	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? +		-EFAULT : 0; +} + +/** + * i40e_ptp_init - Initialize the 1588 support and register the PHC + * @pf: Board private structure + * + * This function registers the device clock as a PHC. If it is successful, it + * starts the clock in the hardware. + **/ +void i40e_ptp_init(struct i40e_pf *pf) +{ +	struct i40e_hw *hw = &pf->hw; +	struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + +	strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name)); +	pf->ptp_caps.owner = THIS_MODULE; +	pf->ptp_caps.max_adj = 999999999; +	pf->ptp_caps.n_ext_ts = 0; +	pf->ptp_caps.pps = 0; +	pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; +	pf->ptp_caps.adjtime = i40e_ptp_adjtime; +	pf->ptp_caps.gettime = i40e_ptp_gettime; +	pf->ptp_caps.settime = i40e_ptp_settime; +	pf->ptp_caps.enable = i40e_ptp_enable; + +	/* Attempt to register the clock before enabling the hardware. */ +	pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); +	if (IS_ERR(pf->ptp_clock)) { +		pf->ptp_clock = NULL; +		dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", +			__func__); +	} else { +		struct timespec ts; +		u32 regval; + +		spin_lock_init(&pf->tmreg_lock); + +		dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, +			 netdev->name); +		pf->flags |= I40E_FLAG_PTP; + +		/* Ensure the clocks are running. */ +		regval = rd32(hw, I40E_PRTTSYN_CTL0); +		regval |= I40E_PRTTSYN_CTL0_TSYNENA_MASK; +		wr32(hw, I40E_PRTTSYN_CTL0, regval); +		regval = rd32(hw, I40E_PRTTSYN_CTL1); +		regval |= I40E_PRTTSYN_CTL1_TSYNENA_MASK; +		wr32(hw, I40E_PRTTSYN_CTL1, regval); + +		/* Set the increment value per clock tick. */ +		i40e_ptp_set_increment(pf); + +		/* reset the tstamp_config */ +		memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config)); + +		/* Set the clock value. */ +		ts = ktime_to_timespec(ktime_get_real()); +		i40e_ptp_settime(&pf->ptp_caps, &ts); +	} +} + +/** + * i40e_ptp_stop - Disable the driver/hardware support and unregister the PHC + * @pf: Board private structure + * + * This function handles the cleanup work required from the initialization by + * clearing out the important information and unregistering the PHC. + **/ +void i40e_ptp_stop(struct i40e_pf *pf) +{ +	pf->flags &= ~I40E_FLAG_PTP; +	pf->ptp_tx = false; +	pf->ptp_rx = false; + +	if (pf->ptp_tx_skb) { +		dev_kfree_skb_any(pf->ptp_tx_skb); +		pf->ptp_tx_skb = NULL; +	} + +	if (pf->ptp_clock) { +		ptp_clock_unregister(pf->ptp_clock); +		pf->ptp_clock = NULL; +		dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, +			 pf->vsi[pf->lan_vsi]->netdev->name); +	} +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 6bd333cde28..947de98500f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -28,6 +27,10 @@  #ifndef _I40E_REGISTER_H_  #define _I40E_REGISTER_H_ +#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ +#define I40E_GL_GP_FUSE_MAX_INDEX 28 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 +#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)  #define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4  #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0  #define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) @@ -38,6 +41,11 @@  #define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)  #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16  #define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 +#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)  #define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8  #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0  #define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) @@ -50,9 +58,14 @@  #define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600  #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0  #define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) +#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ +#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 +#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)  #define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880  #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0  #define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) +  #define I40E_PF_ARQBAH 0x00080180  #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0  #define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT) @@ -837,7 +850,7 @@  #define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)  #define I40E_GLHMC_PEQ1FLMAX 0x000C2058  #define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)  #define I40E_GLHMC_PEQ1MAX 0x000C2054  #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0  #define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) @@ -903,7 +916,7 @@  #define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)  #define I40E_GLHMC_PEXFFLMAX 0x000C204c  #define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)  #define I40E_GLHMC_PEXFMAX 0x000C2048  #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0  #define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) @@ -1327,8 +1340,6 @@  #define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)  #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23  #define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)  #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25  #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)  #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 @@ -1354,8 +1365,6 @@  #define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)  #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23  #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)  #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25  #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)  #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 @@ -1576,6 +1585,14 @@  #define I40E_GLLAN_TSOMSK_M 0x000442DC  #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0  #define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */ +#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 +#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 +#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +  #define I40E_PFLAN_QALLOC 0x001C0400  #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0  #define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) @@ -1636,7 +1653,7 @@  #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11  #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)  #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) -#define I40E_VSILAN_QTABLE_MAX_INDEX 15 +#define I40E_VSILAN_QTABLE_MAX_INDEX 7  #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0  #define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)  #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 @@ -1773,16 +1790,20 @@  #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14  #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)  #define I40E_GL_MNG_FWSM 0x000B6134 -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 +#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1 +#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT) +#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6  #define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)  #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11  #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)  #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15  #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) +#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 +#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)  #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19  #define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) +#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25 +#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)  #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26  #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)  #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 @@ -2035,6 +2056,28 @@  #define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)  #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16  #define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT) +#define I40E_GLNVM_ULD 0x000B6008 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 +#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 +#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 +#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 +#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 +#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 +#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) +  #define I40E_GLPCI_BYTCTH 0x0009C484  #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0  #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) @@ -2170,6 +2213,12 @@  #define I40E_GLPCI_PCIERR 0x000BE4FC  #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0  #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) +#define I40E_GLPCI_PCITEST2 0x000BE4BC +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0 +#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT) +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1 +#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT) +  #define I40E_GLPCI_PKTCT 0x0009C4BC  #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0  #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) @@ -2380,8 +2429,7 @@  #define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)  #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16  #define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17 -#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT) +  #define I40E_PFPE_MRTEIDXMASK 0x00008600  #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0  #define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) @@ -2460,8 +2508,6 @@  #define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)  #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16  #define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17 -#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)  #define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */  #define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127  #define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 @@ -3141,30 +3187,6 @@  #define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31  #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0  #define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLPM_DMACR 0x000881F4 -#define I40E_GLPM_DMACR_DMACWT_SHIFT 0 -#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT) -#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29 -#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT) -#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30 -#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT) -#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31 -#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT) -#define I40E_GLPM_LTRC 0x000BE500 -#define I40E_GLPM_LTRC_SLTRV_SHIFT 0 -#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT) -#define I40E_GLPM_LTRC_SSCALE_SHIFT 10 -#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT) -#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15 -#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT) -#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16 -#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT) -#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26 -#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT) -#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30 -#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT) -#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31 -#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)  #define I40E_PRTPM_EEE_STAT 0x001E4320  #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29  #define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) @@ -3201,9 +3223,6 @@  #define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)  #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31  #define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) -#define I40E_PRTPM_HPTC 0x000AC800 -#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0 -#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)  #define I40E_PRTPM_RLPIC 0x001E43A0  #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0  #define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT) @@ -3265,8 +3284,8 @@  #define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)  #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3  #define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7 -#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT) +#define I40E_GLQF_CTL_RSVD_SHIFT 7 +#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)  #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8  #define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)  #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 @@ -3416,9 +3435,9 @@  #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */  #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8  #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6 -#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) +#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) +#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 +#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)  #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10  #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)  #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) @@ -3504,7 +3523,7 @@  #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5  #define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)  #define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) -#define I40E_VSIQF_TCREGION_MAX_INDEX 7 +#define I40E_VSIQF_TCREGION_MAX_INDEX 3  #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0  #define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)  #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 @@ -3521,10 +3540,7 @@  #define I40E_GL_FCOEDDPC_MAX_INDEX 143  #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0  #define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */ -#define I40E_GL_FCOEDDPEC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0 -#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT) +/* _i=0...143 */  #define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */  #define I40E_GL_FCOEDIFEC_MAX_INDEX 143  #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 @@ -4276,46 +4292,10 @@  #define I40E_PFPM_APM 0x000B8080  #define I40E_PFPM_APM_APME_SHIFT 0  #define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128)) -#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7 -#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0 -#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)  #define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */  #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7  #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0  #define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128)) -#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7 -#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0 -#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT) -#define I40E_PFPM_PROXYFC 0x00245A80 -#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0 -#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT) -#define I40E_PFPM_PROXYFC_EX_SHIFT 1 -#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT) -#define I40E_PFPM_PROXYFC_ARP_SHIFT 4 -#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT) -#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5 -#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYFC_NS_SHIFT 9 -#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT) -#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10 -#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYFC_MLD_SHIFT 12 -#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT) -#define I40E_PFPM_PROXYS 0x00245B80 -#define I40E_PFPM_PROXYS_EX_SHIFT 1 -#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT) -#define I40E_PFPM_PROXYS_ARP_SHIFT 4 -#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT) -#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5 -#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYS_NS_SHIFT 9 -#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT) -#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10 -#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT) -#define I40E_PFPM_PROXYS_MLD_SHIFT 12 -#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)  #define I40E_PFPM_WUC 0x0006B200  #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5  #define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT) @@ -4536,21 +4516,21 @@  #define I40E_VFMSIX_PBA 0x00002000  #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0  #define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */  #define I40E_VFMSIX_TADD_MAX_INDEX 16  #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0  #define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)  #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2  #define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */  #define I40E_VFMSIX_TMSG_MAX_INDEX 16  #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0  #define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */  #define I40E_VFMSIX_TUADD_MAX_INDEX 16  #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0  #define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ +#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */  #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16  #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0  #define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT) @@ -4610,8 +4590,6 @@  #define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)  #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16  #define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17 -#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)  #define I40E_VFPE_MRTEIDXMASK1 0x00009000  #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0  #define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) @@ -4684,5 +4662,13 @@  #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)  #define I40E_VFQF_HREGION_REGION_7_SHIFT 29  #define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT) - +#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT) +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24 +#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)  #endif diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 5e5bcddac57..5f9cac55aa5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 49d2cfa9b0c..e49f31dbd5d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -25,7 +24,9 @@   *   ******************************************************************************/ +#include <linux/prefetch.h>  #include "i40e.h" +#include "i40e_prototype.h"  static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,  				u32 td_tag) @@ -37,19 +38,22 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,  			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));  } +#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)  /**   * i40e_program_fdir_filter - Program a Flow Director filter - * @fdir_input: Packet data that will be filter parameters + * @fdir_data: Packet data that will be filter parameters + * @raw_packet: the pre-allocated packet buffer for FDir   * @pf: The pf pointer   * @add: True for add/update, False for remove   **/ -int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, +int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,  			     struct i40e_pf *pf, bool add)  {  	struct i40e_filter_program_desc *fdir_desc;  	struct i40e_tx_buffer *tx_buf;  	struct i40e_tx_desc *tx_desc;  	struct i40e_ring *tx_ring; +	unsigned int fpt, dcc;  	struct i40e_vsi *vsi;  	struct device *dev;  	dma_addr_t dma; @@ -58,98 +62,90 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,  	/* find existing FDIR VSI */  	vsi = NULL; -	for (i = 0; i < pf->hw.func_caps.num_vsis; i++) +	for (i = 0; i < pf->num_alloc_vsi; i++)  		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)  			vsi = pf->vsi[i];  	if (!vsi)  		return -ENOENT; -	tx_ring = &vsi->tx_rings[0]; +	tx_ring = vsi->tx_rings[0];  	dev = tx_ring->dev; -	dma = dma_map_single(dev, fdir_data->raw_packet, -				I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); +	dma = dma_map_single(dev, raw_packet, +			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);  	if (dma_mapping_error(dev, dma))  		goto dma_fail;  	/* grab the next descriptor */ -	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); -	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; -	tx_ring->next_to_use++; -	if (tx_ring->next_to_use == tx_ring->count) -		tx_ring->next_to_use = 0; +	i = tx_ring->next_to_use; +	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + +	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; -	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index -					     << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -					     & I40E_TXD_FLTR_QW0_QINDEX_MASK); +	fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & +	      I40E_TXD_FLTR_QW0_QINDEX_MASK; -	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off -					    << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -					    & I40E_TXD_FLTR_QW0_FLEXOFF_MASK); +	fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) & +	       I40E_TXD_FLTR_QW0_FLEXOFF_MASK; -	fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype -					     << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) -					     & I40E_TXD_FLTR_QW0_PCTYPE_MASK); +	fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) & +	       I40E_TXD_FLTR_QW0_PCTYPE_MASK;  	/* Use LAN VSI Id if not programmed by user */  	if (fdir_data->dest_vsi == 0) -		fdir_desc->qindex_flex_ptype_vsi |= -					  cpu_to_le32((pf->vsi[pf->lan_vsi]->id) -					   << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT); +		fpt |= (pf->vsi[pf->lan_vsi]->id) << +		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;  	else -		fdir_desc->qindex_flex_ptype_vsi |= -					    cpu_to_le32((fdir_data->dest_vsi -					    << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) -					    & I40E_TXD_FLTR_QW0_DEST_VSI_MASK); +		fpt |= ((u32)fdir_data->dest_vsi << +			I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) & +		       I40E_TXD_FLTR_QW0_DEST_VSI_MASK; + +	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt); -	fdir_desc->dtype_cmd_cntindex = -				    cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG); +	dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;  	if (add) -		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( -				       I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE -					<< I40E_TXD_FLTR_QW1_PCMD_SHIFT); +		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << +		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;  	else -		fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( -					   I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE -					   << I40E_TXD_FLTR_QW1_PCMD_SHIFT); +		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << +		       I40E_TXD_FLTR_QW1_PCMD_SHIFT; -	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl -					  << I40E_TXD_FLTR_QW1_DEST_SHIFT) -					  & I40E_TXD_FLTR_QW1_DEST_MASK); +	dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) & +	       I40E_TXD_FLTR_QW1_DEST_MASK; -	fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( -		     (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) -		      & I40E_TXD_FLTR_QW1_FD_STATUS_MASK); +	dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) & +	       I40E_TXD_FLTR_QW1_FD_STATUS_MASK;  	if (fdir_data->cnt_index != 0) { -		fdir_desc->dtype_cmd_cntindex |= -				    cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); -		fdir_desc->dtype_cmd_cntindex |= -					    cpu_to_le32((fdir_data->cnt_index -					    << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) -					    & I40E_TXD_FLTR_QW1_CNTINDEX_MASK); +		dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; +		dcc |= ((u32)fdir_data->cnt_index << +			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & +			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;  	} +	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);  	fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);  	/* Now program a dummy descriptor */ -	tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); -	tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; -	tx_ring->next_to_use++; -	if (tx_ring->next_to_use == tx_ring->count) -		tx_ring->next_to_use = 0; +	i = tx_ring->next_to_use; +	tx_desc = I40E_TX_DESC(tx_ring, i); +	tx_buf = &tx_ring->tx_bi[i]; + +	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; + +	/* record length, and DMA address */ +	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE); +	dma_unmap_addr_set(tx_buf, dma, dma);  	tx_desc->buffer_addr = cpu_to_le64(dma); -	td_cmd = I40E_TX_DESC_CMD_EOP | -		 I40E_TX_DESC_CMD_RS  | -		 I40E_TX_DESC_CMD_DUMMY; +	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;  	tx_desc->cmd_type_offset_bsz = -		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); +		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); -	/* Mark the data descriptor to be watched */ -	tx_buf->next_to_watch = tx_desc; +	/* set the timestamp */ +	tx_buf->time_stamp = jiffies;  	/* Force memory writes to complete before letting h/w  	 * know there are new descriptors to fetch.  (Only @@ -158,6 +154,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,  	 */  	wmb(); +	/* Mark the data descriptor to be watched */ +	tx_buf->next_to_watch = tx_desc; +  	writel(tx_ring->next_to_use, tx_ring->tail);  	return 0; @@ -165,50 +164,349 @@ dma_fail:  	return -1;  } +#define IP_HEADER_OFFSET 14 +#define I40E_UDPIP_DUMMY_PACKET_LEN 42 +/** + * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, +				   struct i40e_fdir_filter *fd_data, +				   u8 *raw_packet, bool add) +{ +	struct i40e_pf *pf = vsi->back; +	struct udphdr *udp; +	struct iphdr *ip; +	bool err = false; +	int ret; +	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, +		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0, +		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + +	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN); + +	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); +	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET +	      + sizeof(struct iphdr)); + +	ip->daddr = fd_data->dst_ip[0]; +	udp->dest = fd_data->dst_port; +	ip->saddr = fd_data->src_ip[0]; +	udp->source = fd_data->src_port; + +	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; +	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); +	if (ret) { +		dev_info(&pf->pdev->dev, +			 "Filter command send failed for PCTYPE %d (ret = %d)\n", +			 fd_data->pctype, ret); +		err = true; +	} else { +		dev_info(&pf->pdev->dev, +			 "Filter OK for PCTYPE %d (ret = %d)\n", +			 fd_data->pctype, ret); +	} + +	return err ? -EOPNOTSUPP : 0; +} + +#define I40E_TCPIP_DUMMY_PACKET_LEN 54 +/** + * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, +				   struct i40e_fdir_filter *fd_data, +				   u8 *raw_packet, bool add) +{ +	struct i40e_pf *pf = vsi->back; +	struct tcphdr *tcp; +	struct iphdr *ip; +	bool err = false; +	int ret; +	/* Dummy packet */ +	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, +		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0, +		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11, +		0x0, 0x72, 0, 0, 0, 0}; + +	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN); + +	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); +	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET +	      + sizeof(struct iphdr)); + +	ip->daddr = fd_data->dst_ip[0]; +	tcp->dest = fd_data->dst_port; +	ip->saddr = fd_data->src_ip[0]; +	tcp->source = fd_data->src_port; + +	if (add) { +		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { +			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); +			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; +		} +	} + +	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; +	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + +	if (ret) { +		dev_info(&pf->pdev->dev, +			 "Filter command send failed for PCTYPE %d (ret = %d)\n", +			 fd_data->pctype, ret); +		err = true; +	} else { +		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", +			 fd_data->pctype, ret); +	} + +	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + +	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); +	if (ret) { +		dev_info(&pf->pdev->dev, +			 "Filter command send failed for PCTYPE %d (ret = %d)\n", +			 fd_data->pctype, ret); +		err = true; +	} else { +		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", +			  fd_data->pctype, ret); +	} + +	return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Always returns -EOPNOTSUPP + **/ +static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, +				    struct i40e_fdir_filter *fd_data, +				    u8 *raw_packet, bool add) +{ +	return -EOPNOTSUPP; +} + +#define I40E_IP_DUMMY_PACKET_LEN 34 +/** + * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for + * a specific flow spec + * @vsi: pointer to the targeted VSI + * @fd_data: the flow director data required for the FDir descriptor + * @raw_packet: the pre-allocated packet buffer for FDir + * @add: true adds a filter, false removes it + * + * Returns 0 if the filters were successfully added or removed + **/ +static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, +				  struct i40e_fdir_filter *fd_data, +				  u8 *raw_packet, bool add) +{ +	struct i40e_pf *pf = vsi->back; +	struct iphdr *ip; +	bool err = false; +	int ret; +	int i; +	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0, +		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0, +		0, 0, 0, 0}; + +	memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN); +	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET); + +	ip->saddr = fd_data->src_ip[0]; +	ip->daddr = fd_data->dst_ip[0]; +	ip->protocol = 0; + +	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; +	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) { +		fd_data->pctype = i; +		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); + +		if (ret) { +			dev_info(&pf->pdev->dev, +				 "Filter command send failed for PCTYPE %d (ret = %d)\n", +				 fd_data->pctype, ret); +			err = true; +		} else { +			dev_info(&pf->pdev->dev, +				 "Filter OK for PCTYPE %d (ret = %d)\n", +				 fd_data->pctype, ret); +		} +	} + +	return err ? -EOPNOTSUPP : 0; +} + +/** + * i40e_add_del_fdir - Build raw packets to add/del fdir filter + * @vsi: pointer to the targeted VSI + * @cmd: command to get or set RX flow classification rules + * @add: true adds a filter, false removes it + * + **/ +int i40e_add_del_fdir(struct i40e_vsi *vsi, +		      struct i40e_fdir_filter *input, bool add) +{ +	struct i40e_pf *pf = vsi->back; +	u8 *raw_packet; +	int ret; + +	/* Populate the Flow Director that we have at the moment +	 * and allocate the raw packet buffer for the calling functions +	 */ +	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL); +	if (!raw_packet) +		return -ENOMEM; + +	switch (input->flow_type & ~FLOW_EXT) { +	case TCP_V4_FLOW: +		ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet, +					      add); +		break; +	case UDP_V4_FLOW: +		ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet, +					      add); +		break; +	case SCTP_V4_FLOW: +		ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet, +					       add); +		break; +	case IPV4_FLOW: +		ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet, +					     add); +		break; +	case IP_USER_FLOW: +		switch (input->ip4_proto) { +		case IPPROTO_TCP: +			ret = i40e_add_del_fdir_tcpv4(vsi, input, +						      raw_packet, add); +			break; +		case IPPROTO_UDP: +			ret = i40e_add_del_fdir_udpv4(vsi, input, +						      raw_packet, add); +			break; +		case IPPROTO_SCTP: +			ret = i40e_add_del_fdir_sctpv4(vsi, input, +						       raw_packet, add); +			break; +		default: +			ret = i40e_add_del_fdir_ipv4(vsi, input, +						     raw_packet, add); +			break; +		} +		break; +	default: +		dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", +			 input->flow_type); +		ret = -EINVAL; +	} + +	kfree(raw_packet); +	return ret; +} +  /**   * i40e_fd_handle_status - check the Programming Status for FD   * @rx_ring: the Rx ring for this descriptor - * @qw: the descriptor data + * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.   * @prog_id: the id originally used for programming   *   * This is used to verify if the FD programming or invalidation   * requested by SW to the HW is successful or not and take actions accordingly.   **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) +static void i40e_fd_handle_status(struct i40e_ring *rx_ring, +				  union i40e_rx_desc *rx_desc, u8 prog_id)  { -	struct pci_dev *pdev = rx_ring->vsi->back->pdev; +	struct i40e_pf *pf = rx_ring->vsi->back; +	struct pci_dev *pdev = pf->pdev; +	u32 fcnt_prog, fcnt_avail;  	u32 error; +	u64 qw; +	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);  	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>  		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; -	/* for now just print the Status */ -	dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", -		 prog_id, error); +	if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { +		dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", +			 rx_desc->wb.qword0.hi_dword.fd_id); + +		/* filter programming failed most likely due to table full */ +		fcnt_prog = i40e_get_current_fd_count(pf); +		fcnt_avail = i40e_get_fd_cnt_all(pf); +		/* If ATR is running fcnt_prog can quickly change, +		 * if we are very close to full, it makes sense to disable +		 * FD ATR/SB and then re-enable it when there is room. +		 */ +		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { +			/* Turn off ATR first */ +			if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { +				pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; +				dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); +				pf->auto_disable_flags |= +						       I40E_FLAG_FD_ATR_ENABLED; +				pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; +			} else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { +				pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; +				dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); +				pf->auto_disable_flags |= +							I40E_FLAG_FD_SB_ENABLED; +				pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; +			} +		} else { +			dev_info(&pdev->dev, "FD filter programming error\n"); +		} +	} else if (error == +			  (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { +		if (I40E_DEBUG_FD & pf->hw.debug_mask) +			dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", +				 rx_desc->wb.qword0.hi_dword.fd_id); +	}  }  /** - * i40e_unmap_tx_resource - Release a Tx buffer + * i40e_unmap_and_free_tx_resource - Release a Tx buffer   * @ring:      the ring that owns the buffer   * @tx_buffer: the buffer to free   **/ -static inline void i40e_unmap_tx_resource(struct i40e_ring *ring, -					  struct i40e_tx_buffer *tx_buffer) +static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, +					    struct i40e_tx_buffer *tx_buffer)  { -	if (tx_buffer->dma) { -		if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE) -			dma_unmap_page(ring->dev, -				       tx_buffer->dma, -				       tx_buffer->length, -				       DMA_TO_DEVICE); -		else +	if (tx_buffer->skb) { +		dev_kfree_skb_any(tx_buffer->skb); +		if (dma_unmap_len(tx_buffer, len))  			dma_unmap_single(ring->dev, -					 tx_buffer->dma, -					 tx_buffer->length, +					 dma_unmap_addr(tx_buffer, dma), +					 dma_unmap_len(tx_buffer, len),  					 DMA_TO_DEVICE); +	} else if (dma_unmap_len(tx_buffer, len)) { +		dma_unmap_page(ring->dev, +			       dma_unmap_addr(tx_buffer, dma), +			       dma_unmap_len(tx_buffer, len), +			       DMA_TO_DEVICE);  	} -	tx_buffer->dma = 0; -	tx_buffer->time_stamp = 0; +	tx_buffer->next_to_watch = NULL; +	tx_buffer->skb = NULL; +	dma_unmap_len_set(tx_buffer, len, 0); +	/* tx_buffer must be completely set up in the transmit path */  }  /** @@ -217,7 +515,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,   **/  void i40e_clean_tx_ring(struct i40e_ring *tx_ring)  { -	struct i40e_tx_buffer *tx_buffer;  	unsigned long bi_size;  	u16 i; @@ -226,13 +523,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)  		return;  	/* Free all the Tx ring sk_buffs */ -	for (i = 0; i < tx_ring->count; i++) { -		tx_buffer = &tx_ring->tx_bi[i]; -		i40e_unmap_tx_resource(tx_ring, tx_buffer); -		if (tx_buffer->skb) -			dev_kfree_skb_any(tx_buffer->skb); -		tx_buffer->skb = NULL; -	} +	for (i = 0; i < tx_ring->count; i++) +		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);  	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;  	memset(tx_ring->tx_bi, 0, bi_size); @@ -242,6 +534,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)  	tx_ring->next_to_use = 0;  	tx_ring->next_to_clean = 0; + +	if (!tx_ring->netdev) +		return; + +	/* cleanup Tx queue statistics */ +	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, +						  tx_ring->queue_index));  }  /** @@ -300,14 +599,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)  	 * run the check_tx_hang logic with a transmit completion  	 * pending but without time to complete it yet.  	 */ -	if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) && +	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&  	    tx_pending) {  		/* make sure it is true for two checks in a row */  		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,  				       &tx_ring->state);  	} else {  		/* update completed stats and disarm the hang check */ -		tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets; +		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;  		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);  	} @@ -315,6 +614,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)  }  /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring:  tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ +	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + +	return le32_to_cpu(*(volatile __le32 *)head); +} + +/**   * i40e_clean_tx_irq - Reclaim resources after transmit completes   * @tx_ring:  tx ring to clean   * @budget:   how many cleans we're allowed @@ -325,68 +638,96 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)  {  	u16 i = tx_ring->next_to_clean;  	struct i40e_tx_buffer *tx_buf; +	struct i40e_tx_desc *tx_head;  	struct i40e_tx_desc *tx_desc;  	unsigned int total_packets = 0;  	unsigned int total_bytes = 0;  	tx_buf = &tx_ring->tx_bi[i];  	tx_desc = I40E_TX_DESC(tx_ring, i); +	i -= tx_ring->count; -	for (; budget; budget--) { -		struct i40e_tx_desc *eop_desc; +	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); -		eop_desc = tx_buf->next_to_watch; +	do { +		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;  		/* if next_to_watch is not set then there is no work pending */  		if (!eop_desc)  			break; -		/* if the descriptor isn't done, no work yet to do */ -		if (!(eop_desc->cmd_type_offset_bsz & -		      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE))) +		/* prevent any other reads prior to eop_desc */ +		read_barrier_depends(); + +		/* we have caught up to head, no work left to do */ +		if (tx_head == tx_desc)  			break; -		/* count the packet as being completed */ -		tx_ring->tx_stats.completed++; +		/* clear next_to_watch to prevent false hangs */  		tx_buf->next_to_watch = NULL; -		tx_buf->time_stamp = 0; -		/* set memory barrier before eop_desc is verified */ -		rmb(); +		/* update the statistics for this packet */ +		total_bytes += tx_buf->bytecount; +		total_packets += tx_buf->gso_segs; -		do { -			i40e_unmap_tx_resource(tx_ring, tx_buf); +		/* free the skb */ +		dev_kfree_skb_any(tx_buf->skb); -			/* clear dtype status */ -			tx_desc->cmd_type_offset_bsz &= -				~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK); +		/* unmap skb header data */ +		dma_unmap_single(tx_ring->dev, +				 dma_unmap_addr(tx_buf, dma), +				 dma_unmap_len(tx_buf, len), +				 DMA_TO_DEVICE); -			if (likely(tx_desc == eop_desc)) { -				eop_desc = NULL; +		/* clear tx_buffer data */ +		tx_buf->skb = NULL; +		dma_unmap_len_set(tx_buf, len, 0); -				dev_kfree_skb_any(tx_buf->skb); -				tx_buf->skb = NULL; - -				total_bytes += tx_buf->bytecount; -				total_packets += tx_buf->gso_segs; -			} +		/* unmap remaining buffers */ +		while (tx_desc != eop_desc) {  			tx_buf++;  			tx_desc++;  			i++; -			if (unlikely(i == tx_ring->count)) { -				i = 0; +			if (unlikely(!i)) { +				i -= tx_ring->count;  				tx_buf = tx_ring->tx_bi;  				tx_desc = I40E_TX_DESC(tx_ring, 0);  			} -		} while (eop_desc); -	} +			/* unmap any remaining paged data */ +			if (dma_unmap_len(tx_buf, len)) { +				dma_unmap_page(tx_ring->dev, +					       dma_unmap_addr(tx_buf, dma), +					       dma_unmap_len(tx_buf, len), +					       DMA_TO_DEVICE); +				dma_unmap_len_set(tx_buf, len, 0); +			} +		} + +		/* move us one more past the eop_desc for start of next pkt */ +		tx_buf++; +		tx_desc++; +		i++; +		if (unlikely(!i)) { +			i -= tx_ring->count; +			tx_buf = tx_ring->tx_bi; +			tx_desc = I40E_TX_DESC(tx_ring, 0); +		} + +		/* update budget accounting */ +		budget--; +	} while (likely(budget)); + +	i += tx_ring->count;  	tx_ring->next_to_clean = i; -	tx_ring->tx_stats.bytes += total_bytes; -	tx_ring->tx_stats.packets += total_packets; +	u64_stats_update_begin(&tx_ring->syncp); +	tx_ring->stats.bytes += total_bytes; +	tx_ring->stats.packets += total_packets; +	u64_stats_update_end(&tx_ring->syncp);  	tx_ring->q_vector->tx.total_bytes += total_bytes;  	tx_ring->q_vector->tx.total_packets += total_packets; +  	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {  		/* schedule immediate reset if we believe we hung */  		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" @@ -414,6 +755,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)  		return true;  	} +	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, +						      tx_ring->queue_index), +				  total_packets, total_bytes); +  #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)  	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&  		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { @@ -524,8 +869,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)  	i40e_set_new_dynamic_itr(&q_vector->tx);  	if (old_itr != q_vector->tx.itr)  		wr32(hw, reg_addr, q_vector->tx.itr); - -	i40e_flush(hw);  }  /** @@ -549,7 +892,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,  		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;  	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) -		i40e_fd_handle_status(rx_ring, qw, id); +		i40e_fd_handle_status(rx_ring, rx_desc, id);  }  /** @@ -573,6 +916,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)  	/* round up to nearest 4K */  	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); +	/* add u32 for head writeback, align after this takes care of +	 * guaranteeing this is at least one cache line in size +	 */ +	tx_ring->size += sizeof(u32);  	tx_ring->size = ALIGN(tx_ring->size, 4096);  	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,  					   &tx_ring->dma, GFP_KERNEL); @@ -746,7 +1093,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)  			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,  							rx_ring->rx_buf_len);  			if (!skb) { -				rx_ring->rx_stats.alloc_rx_buff_failed++; +				rx_ring->rx_stats.alloc_buff_failed++;  				goto no_buffers;  			}  			/* initialize queue mapping */ @@ -760,7 +1107,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)  						 rx_ring->rx_buf_len,  						 DMA_FROM_DEVICE);  			if (dma_mapping_error(rx_ring->dev, bi->dma)) { -				rx_ring->rx_stats.alloc_rx_buff_failed++; +				rx_ring->rx_stats.alloc_buff_failed++;  				bi->dma = 0;  				goto no_buffers;  			} @@ -770,7 +1117,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)  			if (!bi->page) {  				bi->page = alloc_page(GFP_ATOMIC);  				if (!bi->page) { -					rx_ring->rx_stats.alloc_rx_page_failed++; +					rx_ring->rx_stats.alloc_page_failed++;  					goto no_buffers;  				}  			} @@ -785,7 +1132,7 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)  							    DMA_FROM_DEVICE);  				if (dma_mapping_error(rx_ring->dev,  						      bi->page_dma)) { -					rx_ring->rx_stats.alloc_rx_page_failed++; +					rx_ring->rx_stats.alloc_page_failed++;  					bi->page_dma = 0;  					goto no_buffers;  				} @@ -838,27 +1185,107 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,   * @skb: skb currently being received and modified   * @rx_status: status value of last descriptor in packet   * @rx_error: error value of last descriptor in packet + * @rx_ptype: ptype value of last descriptor in packet   **/  static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  				    struct sk_buff *skb,  				    u32 rx_status, -				    u32 rx_error) +				    u32 rx_error, +				    u16 rx_ptype)  { +	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); +	bool ipv4 = false, ipv6 = false; +	bool ipv4_tunnel, ipv6_tunnel; +	__wsum rx_udp_csum; +	struct iphdr *iph; +	__sum16 csum; + +	ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && +		      (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); +	ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && +		      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + +	skb->encapsulation = ipv4_tunnel || ipv6_tunnel;  	skb->ip_summed = CHECKSUM_NONE;  	/* Rx csum enabled and ip headers found? */ -	if (!(vsi->netdev->features & NETIF_F_RXCSUM && -	      rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) +	if (!(vsi->netdev->features & NETIF_F_RXCSUM)) +		return; + +	/* did the hardware decode the packet and checksum? */ +	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) +		return; + +	/* both known and outer_ip must be set for the below code to work */ +	if (!(decoded.known && decoded.outer_ip))  		return; -	/* IP or L4 checksum error */ -	if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | -			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) { -		vsi->back->hw_csum_rx_error++; +	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && +	    decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) +		ipv4 = true; +	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && +		 decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) +		ipv6 = true; + +	if (ipv4 && +	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | +			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))) +		goto checksum_fail; + +	/* likely incorrect csum if alternate IP extension headers found */ +	if (ipv6 && +	    decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP && +	    rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) && +	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) +		/* don't increment checksum err here, non-fatal err */  		return; + +	/* there was some L4 error, count error and punt packet to the stack */ +	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) +		goto checksum_fail; + +	/* handle packets that were not able to be checksummed due +	 * to arrival speed, in this case the stack can compute +	 * the csum. +	 */ +	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT)) +		return; + +	/* If VXLAN traffic has an outer UDPv4 checksum we need to check +	 * it in the driver, hardware does not do it for us. +	 * Since L3L4P bit was set we assume a valid IHL value (>=5) +	 * so the total length of IPv4 header is IHL*4 bytes +	 * The UDP_0 bit *may* bet set if the *inner* header is UDP +	 */ +	if (ipv4_tunnel && +	    (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && +	    !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { +		skb->transport_header = skb->mac_header + +					sizeof(struct ethhdr) + +					(ip_hdr(skb)->ihl * 4); + +		/* Add 4 bytes for VLAN tagged packets */ +		skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || +					  skb->protocol == htons(ETH_P_8021AD)) +					  ? VLAN_HLEN : 0; + +		rx_udp_csum = udp_csum(skb); +		iph = ip_hdr(skb); +		csum = csum_tcpudp_magic( +				iph->saddr, iph->daddr, +				(skb->len - skb_transport_offset(skb)), +				IPPROTO_UDP, rx_udp_csum); + +		if (udp_hdr(skb)->check != csum) +			goto checksum_fail;  	}  	skb->ip_summed = CHECKSUM_UNNECESSARY; + +	return; + +checksum_fail: +	vsi->back->hw_csum_rx_error++;  }  /** @@ -869,13 +1296,38 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,  static inline u32 i40e_rx_hash(struct i40e_ring *ring,  			       union i40e_rx_desc *rx_desc)  { -	if (ring->netdev->features & NETIF_F_RXHASH) { -		if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >> -		     I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) & -		    I40E_RX_DESC_FLTSTAT_RSS_HASH) -			return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); -	} -	return 0; +	const __le64 rss_mask = +		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << +			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); + +	if ((ring->netdev->features & NETIF_F_RXHASH) && +	    (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) +		return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); +	else +		return 0; +} + +/** + * i40e_ptype_to_hash - get a hash type + * @ptype: the ptype value from the descriptor + * + * Returns a hash type to be used by skb_set_hash + **/ +static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) +{ +	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); + +	if (!decoded.known) +		return PKT_HASH_TYPE_NONE; + +	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && +	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) +		return PKT_HASH_TYPE_L4; +	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && +		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) +		return PKT_HASH_TYPE_L3; +	else +		return PKT_HASH_TYPE_L2;  }  /** @@ -895,12 +1347,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)  	u16 i = rx_ring->next_to_clean;  	union i40e_rx_desc *rx_desc;  	u32 rx_error, rx_status; +	u8 rx_ptype;  	u64 qword; +	if (budget <= 0) +		return 0; +  	rx_desc = I40E_RX_DESC(rx_ring, i);  	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); -	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) -				>> I40E_RXD_QW1_STATUS_SHIFT; +	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> +		    I40E_RXD_QW1_STATUS_SHIFT;  	while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {  		union i40e_rx_desc *next_rxd; @@ -916,18 +1372,20 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)  		skb = rx_bi->skb;  		prefetch(skb->data); -		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) -					      >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; -		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) -					      >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT; -		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) -					      >> I40E_RXD_QW1_LENGTH_SPH_SHIFT; +		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> +				I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> +				I40E_RXD_QW1_LENGTH_HBUF_SHIFT; +		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> +			 I40E_RXD_QW1_LENGTH_SPH_SHIFT; -		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) -					      >> I40E_RXD_QW1_ERROR_SHIFT; +		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> +			   I40E_RXD_QW1_ERROR_SHIFT;  		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);  		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); +		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> +			   I40E_RXD_QW1_PTYPE_SHIFT;  		rx_bi->skb = NULL;  		/* This memory barrier is needed to keep us from reading @@ -1004,17 +1462,29 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)  		/* ERR_MASK will only have valid bits if EOP set */  		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {  			dev_kfree_skb_any(skb); +			/* TODO: shouldn't we increment a counter indicating the +			 * drop? +			 */  			goto next_desc;  		} -		skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); -		i40e_rx_checksum(vsi, skb, rx_status, rx_error); +		skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), +			     i40e_ptype_to_hash(rx_ptype)); +		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { +			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & +					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> +					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); +			rx_ring->last_rx_timestamp = jiffies; +		}  		/* probably a little skewed due to removing CRC */  		total_rx_bytes += skb->len;  		total_rx_packets++;  		skb->protocol = eth_type_trans(skb, rx_ring->netdev); + +		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); +  		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)  			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)  			 : 0; @@ -1037,13 +1507,15 @@ next_desc:  		/* use prefetched values */  		rx_desc = next_rxd;  		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); -		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) -						>> I40E_RXD_QW1_STATUS_SHIFT; +		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> +			    I40E_RXD_QW1_STATUS_SHIFT;  	}  	rx_ring->next_to_clean = i; -	rx_ring->rx_stats.packets += total_rx_packets; -	rx_ring->rx_stats.bytes += total_rx_bytes; +	u64_stats_update_begin(&rx_ring->syncp); +	rx_ring->stats.packets += total_rx_packets; +	rx_ring->stats.bytes += total_rx_bytes; +	u64_stats_update_end(&rx_ring->syncp);  	rx_ring->q_vector->rx.total_packets += total_rx_packets;  	rx_ring->q_vector->rx.total_bytes += total_rx_bytes; @@ -1067,27 +1539,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)  	struct i40e_q_vector *q_vector =  			       container_of(napi, struct i40e_q_vector, napi);  	struct i40e_vsi *vsi = q_vector->vsi; +	struct i40e_ring *ring;  	bool clean_complete = true;  	int budget_per_ring; -	int i;  	if (test_bit(__I40E_DOWN, &vsi->state)) {  		napi_complete(napi);  		return 0;  	} +	/* Since the actual Tx work is minimal, we can give the Tx a larger +	 * budget and be more aggressive about cleaning up the Tx descriptors. +	 */ +	i40e_for_each_ring(ring, q_vector->tx) +		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); +  	/* We attempt to distribute budget to each Rx queue fairly, but don't  	 * allow the budget to go below 1 because that would exit polling early. -	 * Since the actual Tx work is minimal, we can give the Tx a larger -	 * budget and be more aggressive about cleaning up the Tx descriptors.  	 */  	budget_per_ring = max(budget/q_vector->num_ringpairs, 1); -	for (i = 0; i < q_vector->num_ringpairs; i++) { -		clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i], -						    vsi->work_limit); -		clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i], -						    budget_per_ring); -	} + +	i40e_for_each_ring(ring, q_vector->rx) +		clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);  	/* If work not completed, return budget and polling will return */  	if (!clean_complete) @@ -1117,7 +1590,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)  			qval = rd32(hw, I40E_QINT_TQCTL(0));  			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;  			wr32(hw, I40E_QINT_TQCTL(0), qval); -			i40e_flush(hw); + +			i40e_irq_dynamic_enable_icr0(vsi->back);  		}  	} @@ -1144,17 +1618,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,  	struct tcphdr *th;  	unsigned int hlen;  	u32 flex_ptype, dtype_cmd; +	u16 i;  	/* make sure ATR is enabled */ -	if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) +	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))  		return;  	/* if sampling is disabled do nothing */  	if (!tx_ring->atr_sample_rate)  		return; -	tx_ring->atr_count++; -  	/* snag network header to get L4 type and address */  	hdr.network = skb_network_header(skb); @@ -1176,17 +1649,27 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,  	th = (struct tcphdr *)(hdr.network + hlen); -	/* sample on all syn/fin packets or once every atr sample rate */ -	if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate)) +	/* Due to lack of space, no more new filters can be programmed */ +	if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) +		return; + +	tx_ring->atr_count++; + +	/* sample on all syn/fin/rst packets or once every atr sample rate */ +	if (!th->fin && +	    !th->syn && +	    !th->rst && +	    (tx_ring->atr_count < tx_ring->atr_sample_rate))  		return;  	tx_ring->atr_count = 0;  	/* grab the next descriptor */ -	fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); -	tx_ring->next_to_use++; -	if (tx_ring->next_to_use == tx_ring->count) -		tx_ring->next_to_use = 0; +	i = tx_ring->next_to_use; +	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); + +	i++; +	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;  	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &  		      I40E_TXD_FLTR_QW0_QINDEX_MASK; @@ -1200,7 +1683,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,  	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; -	dtype_cmd |= th->fin ? +	dtype_cmd |= (th->fin || th->rst) ?  		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<  		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :  		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << @@ -1212,11 +1695,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,  	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<  		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; +	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; +	dtype_cmd |= +		((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & +		I40E_TXD_FLTR_QW1_CNTINDEX_MASK; +  	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);  	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);  } -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)  /**   * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW   * @skb:     send buffer @@ -1241,7 +1728,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,  		tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;  		tx_flags |= I40E_TX_FLAGS_HW_VLAN;  	/* else if it is a SW VLAN, check the next protocol and store the tag */ -	} else if (protocol == __constant_htons(ETH_P_8021Q)) { +	} else if (protocol == htons(ETH_P_8021Q)) {  		struct vlan_hdr *vhdr, _vhdr;  		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);  		if (!vhdr) @@ -1261,9 +1748,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,  				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;  		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {  			struct vlan_ethhdr *vhdr; -			if (skb_header_cloned(skb) && -			    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) -				return -ENOMEM; +			int rc; + +			rc = skb_cow_head(skb, 0); +			if (rc < 0) +				return rc;  			vhdr = (struct vlan_ethhdr *)skb->data;  			vhdr->h_vlan_TCI = htons(tx_flags >>  						 I40E_TX_FLAGS_VLAN_SHIFT); @@ -1276,27 +1765,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,  }  /** - * i40e_tx_csum - is checksum offload requested - * @tx_ring:  ptr to the ring to send - * @skb:      ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol - * - * Returns true if checksum offload is requested - **/ -static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb, -			 u32 tx_flags, __be16 protocol) -{ -	if ((skb->ip_summed != CHECKSUM_PARTIAL) && -	    !(tx_flags & I40E_TX_FLAGS_TXSW)) { -		if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN)) -			return false; -	} - -	return skb->ip_summed == CHECKSUM_PARTIAL; -} - -/**   * i40e_tso - set up the tso context descriptor   * @tx_ring:  ptr to the ring to send   * @skb:      ptr to the skb we're sending @@ -1312,22 +1780,20 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,  		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)  {  	u32 cd_cmd, cd_tso_len, cd_mss; +	struct ipv6hdr *ipv6h;  	struct tcphdr *tcph;  	struct iphdr *iph;  	u32 l4len;  	int err; -	struct ipv6hdr *ipv6h;  	if (!skb_is_gso(skb))  		return 0; -	if (skb_header_cloned(skb)) { -		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); -		if (err) -			return err; -	} +	err = skb_cow_head(skb, 0); +	if (err < 0) +		return err; -	if (protocol == __constant_htons(ETH_P_IP)) { +	if (protocol == htons(ETH_P_IP)) {  		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);  		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);  		iph->tot_len = 0; @@ -1353,10 +1819,47 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,  	cd_cmd = I40E_TX_CTX_DESC_TSO;  	cd_tso_len = skb->len - *hdr_len;  	cd_mss = skb_shinfo(skb)->gso_size; -	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) -			     | ((u64)cd_tso_len -				<< I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) -			     | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); +	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | +				((u64)cd_tso_len << +				 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | +				((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); +	return 1; +} + +/** + * i40e_tsyn - set up the tsyn context descriptor + * @tx_ring:  ptr to the ring to send + * @skb:      ptr to the skb we're sending + * @tx_flags: the collected send information + * + * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen + **/ +static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, +		     u32 tx_flags, u64 *cd_type_cmd_tso_mss) +{ +	struct i40e_pf *pf; + +	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) +		return 0; + +	/* Tx timestamps cannot be sampled when doing TSO */ +	if (tx_flags & I40E_TX_FLAGS_TSO) +		return 0; + +	/* only timestamp the outbound packet if the user has requested it and +	 * we are not already transmitting a packet to be timestamped +	 */ +	pf = i40e_netdev_to_pf(tx_ring->netdev); +	if (pf->ptp_tx && !pf->ptp_tx_skb) { +		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +		pf->ptp_tx_skb = skb_get(skb); +	} else { +		return 0; +	} + +	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN << +				I40E_TXD_CTX_QW1_CMD_SHIFT; +  	return 1;  } @@ -1482,15 +1985,17 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,  			       const u32 cd_tunneling, const u32 cd_l2tag2)  {  	struct i40e_tx_context_desc *context_desc; +	int i = tx_ring->next_to_use; -	if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) +	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) && +	    !cd_tunneling && !cd_l2tag2)  		return;  	/* grab the next descriptor */ -	context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); -	tx_ring->next_to_use++; -	if (tx_ring->next_to_use == tx_ring->count) -		tx_ring->next_to_use = 0; +	context_desc = I40E_TX_CTXTDESC(tx_ring, i); + +	i++; +	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;  	/* cpu_to_le32 and assign to struct fields */  	context_desc->tunneling_params = cpu_to_le32(cd_tunneling); @@ -1512,68 +2017,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,  			struct i40e_tx_buffer *first, u32 tx_flags,  			const u8 hdr_len, u32 td_cmd, u32 td_offset)  { -	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];  	unsigned int data_len = skb->data_len;  	unsigned int size = skb_headlen(skb); -	struct device *dev = tx_ring->dev; -	u32 paylen = skb->len - hdr_len; -	u16 i = tx_ring->next_to_use; +	struct skb_frag_struct *frag;  	struct i40e_tx_buffer *tx_bi;  	struct i40e_tx_desc *tx_desc; -	u32 buf_offset = 0; +	u16 i = tx_ring->next_to_use;  	u32 td_tag = 0;  	dma_addr_t dma;  	u16 gso_segs; -	dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); -	if (dma_mapping_error(dev, dma)) -		goto dma_error; -  	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {  		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;  		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>  			 I40E_TX_FLAGS_VLAN_SHIFT;  	} +	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) +		gso_segs = skb_shinfo(skb)->gso_segs; +	else +		gso_segs = 1; + +	/* multiply data chunks by size of headers */ +	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len); +	first->gso_segs = gso_segs; +	first->skb = skb; +	first->tx_flags = tx_flags; + +	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); +  	tx_desc = I40E_TX_DESC(tx_ring, i); -	for (;;) { -		while (size > I40E_MAX_DATA_PER_TXD) { -			tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); +	tx_bi = first; + +	for (frag = &skb_shinfo(skb)->frags[0];; frag++) { +		if (dma_mapping_error(tx_ring->dev, dma)) +			goto dma_error; + +		/* record length, and DMA address */ +		dma_unmap_len_set(tx_bi, len, size); +		dma_unmap_addr_set(tx_bi, dma, dma); + +		tx_desc->buffer_addr = cpu_to_le64(dma); + +		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {  			tx_desc->cmd_type_offset_bsz =  				build_ctob(td_cmd, td_offset,  					   I40E_MAX_DATA_PER_TXD, td_tag); -			buf_offset += I40E_MAX_DATA_PER_TXD; -			size -= I40E_MAX_DATA_PER_TXD; -  			tx_desc++;  			i++;  			if (i == tx_ring->count) {  				tx_desc = I40E_TX_DESC(tx_ring, 0);  				i = 0;  			} -		} -		tx_bi = &tx_ring->tx_bi[i]; -		tx_bi->length = buf_offset + size; -		tx_bi->tx_flags = tx_flags; -		tx_bi->dma = dma; +			dma += I40E_MAX_DATA_PER_TXD; +			size -= I40E_MAX_DATA_PER_TXD; -		tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset); -		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, -							  size, td_tag); +			tx_desc->buffer_addr = cpu_to_le64(dma); +		}  		if (likely(!data_len))  			break; -		size = skb_frag_size(frag); -		data_len -= size; -		buf_offset = 0; -		tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE; - -		dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); -		if (dma_mapping_error(dev, dma)) -			goto dma_error; +		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, +							  size, td_tag);  		tx_desc++;  		i++; @@ -1582,31 +2090,39 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,  			i = 0;  		} -		frag++; -	} - -	tx_desc->cmd_type_offset_bsz |= -		       cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); +		size = skb_frag_size(frag); +		data_len -= size; -	i++; -	if (i == tx_ring->count) -		i = 0; +		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, +				       DMA_TO_DEVICE); -	tx_ring->next_to_use = i; +		tx_bi = &tx_ring->tx_bi[i]; +	} -	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) -		gso_segs = skb_shinfo(skb)->gso_segs; -	else -		gso_segs = 1; +	/* Place RS bit on last descriptor of any packet that spans across the +	 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. +	 */ +#define WB_STRIDE 0x3 +	if (((i & WB_STRIDE) != WB_STRIDE) && +	    (first <= &tx_ring->tx_bi[i]) && +	    (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { +		tx_desc->cmd_type_offset_bsz = +			build_ctob(td_cmd, td_offset, size, td_tag) | +			cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP << +					 I40E_TXD_QW1_CMD_SHIFT); +	} else { +		tx_desc->cmd_type_offset_bsz = +			build_ctob(td_cmd, td_offset, size, td_tag) | +			cpu_to_le64((u64)I40E_TXD_CMD << +					 I40E_TXD_QW1_CMD_SHIFT); +	} -	/* multiply data chunks by size of headers */ -	tx_bi->bytecount = paylen + (gso_segs * hdr_len); -	tx_bi->gso_segs = gso_segs; -	tx_bi->skb = skb; +	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, +						 tx_ring->queue_index), +			     first->bytecount); -	/* set the timestamp and next to watch values */ +	/* set the timestamp */  	first->time_stamp = jiffies; -	first->next_to_watch = tx_desc;  	/* Force memory writes to complete before letting h/w  	 * know there are new descriptors to fetch.  (Only @@ -1615,16 +2131,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,  	 */  	wmb(); +	/* set next_to_watch value indicating a packet is present */ +	first->next_to_watch = tx_desc; + +	i++; +	if (i == tx_ring->count) +		i = 0; + +	tx_ring->next_to_use = i; + +	/* notify HW of packet */  	writel(i, tx_ring->tail); +  	return;  dma_error: -	dev_info(dev, "TX DMA map failed\n"); +	dev_info(tx_ring->dev, "TX DMA map failed\n");  	/* clear dma mappings for failed tx_bi map */  	for (;;) {  		tx_bi = &tx_ring->tx_bi[i]; -		i40e_unmap_tx_resource(tx_ring, tx_bi); +		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);  		if (tx_bi == first)  			break;  		if (i == 0) @@ -1632,8 +2159,6 @@ dma_error:  		i--;  	} -	dev_kfree_skb_any(skb); -  	tx_ring->next_to_use = i;  } @@ -1647,6 +2172,7 @@ dma_error:  static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)  {  	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); +	/* Memory barrier before checking head and tail */  	smp_mb();  	/* Check again in a case another CPU has just made room available. */ @@ -1685,25 +2211,20 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)  static int i40e_xmit_descriptor_count(struct sk_buff *skb,  				      struct i40e_ring *tx_ring)  { -#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD  	unsigned int f; -#endif  	int count = 0;  	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,  	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, -	 *       + 2 desc gap to keep tail from touching head, +	 *       + 4 desc gap to avoid the cache line where head is,  	 *       + 1 desc for context descriptor,  	 * otherwise try next time  	 */ -#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD  	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)  		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); -#else -	count += skb_shinfo(skb)->nr_frags; -#endif +  	count += TXD_USE_COUNT(skb_headlen(skb)); -	if (i40e_maybe_stop_tx(tx_ring, count + 3)) { +	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {  		tx_ring->tx_stats.tx_busy++;  		return 0;  	} @@ -1728,6 +2249,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,  	__be16 protocol;  	u32 td_cmd = 0;  	u8 hdr_len = 0; +	int tsyn;  	int tso;  	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))  		return NETDEV_TX_BUSY; @@ -1743,9 +2265,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,  	first = &tx_ring->tx_bi[tx_ring->next_to_use];  	/* setup IPv4/IPv6 offloads */ -	if (protocol == __constant_htons(ETH_P_IP)) +	if (protocol == htons(ETH_P_IP))  		tx_flags |= I40E_TX_FLAGS_IPV4; -	else if (protocol == __constant_htons(ETH_P_IPV6)) +	else if (protocol == htons(ETH_P_IPV6))  		tx_flags |= I40E_TX_FLAGS_IPV6;  	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, @@ -1758,16 +2280,21 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,  	skb_tx_timestamp(skb); -	/* Always offload the checksum, since it's in the data descriptor */ -	if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol)) -		tx_flags |= I40E_TX_FLAGS_CSUM; +	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); + +	if (tsyn) +		tx_flags |= I40E_TX_FLAGS_TSYN; -	/* always enable offload insertion */ +	/* always enable CRC insertion offload */  	td_cmd |= I40E_TX_DESC_CMD_ICRC; -	if (tx_flags & I40E_TX_FLAGS_CSUM) +	/* Always offload the checksum, since it's in the data descriptor */ +	if (skb->ip_summed == CHECKSUM_PARTIAL) { +		tx_flags |= I40E_TX_FLAGS_CSUM; +  		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,  				    tx_ring, &cd_tunneling); +	}  	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,  			   cd_tunneling, cd_l2tag2); @@ -1801,7 +2328,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev);  	struct i40e_vsi *vsi = np->vsi; -	struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; +	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];  	/* hardware can't handle really short frames, hardware padding works  	 * beyond this point diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index b1d7722d98a..0277894fe1c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -25,11 +24,13 @@   *   ******************************************************************************/ -/* Interrupt Throttling and Rate Limiting (storm control) Goodies */ +#ifndef _I40E_TXRX_H_ +#define _I40E_TXRX_H_ -#define I40E_MAX_ITR               0x07FF -#define I40E_MIN_ITR               0x0001 -#define I40E_ITR_USEC_RESOLUTION   2 +/* Interrupt Throttling and Rate Limiting Goodies */ + +#define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */ +#define I40E_MIN_ITR               0x0004  /* reg uses 2 usec resolution */  #define I40E_MAX_IRATE             0x03F  #define I40E_MIN_IRATE             0x001  #define I40E_IRATE_USEC_RESOLUTION 4 @@ -49,10 +50,38 @@  #define I40E_QUEUE_END_OF_LIST 0x7FF -#define I40E_ITR_NONE  3 -#define I40E_RX_ITR    0 -#define I40E_TX_ITR    1 -#define I40E_PE_ITR    2 +/* this enum matches hardware bits and is meant to be used by DYN_CTLN + * registers and QINT registers or more generally anywhere in the manual + * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any + * register but instead is a special value meaning "don't update" ITR0/1/2. + */ +enum i40e_dyn_idx_t { +	I40E_IDX_ITR0 = 0, +	I40E_IDX_ITR1 = 1, +	I40E_IDX_ITR2 = 2, +	I40E_ITR_NONE = 3	/* ITR_NONE must not be used as an index */ +}; + +/* these are indexes into ITRN registers */ +#define I40E_RX_ITR    I40E_IDX_ITR0 +#define I40E_TX_ITR    I40E_IDX_ITR1 +#define I40E_PE_ITR    I40E_IDX_ITR2 + +/* Supported RSS offloads */ +#define I40E_DEFAULT_RSS_HENA ( \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ +	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ +	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ +	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \ +	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD)) +  /* Supported Rx Buffer Sizes */  #define I40E_RXBUFFER_512   512    /* Used for packet split */  #define I40E_RXBUFFER_2048  2048 @@ -88,11 +117,11 @@  #define i40e_rx_desc i40e_32byte_rx_desc  #define I40E_MIN_TX_LEN		17 -#define I40E_MAX_DATA_PER_TXD	16383	/* aka 16kB - 1 */ +#define I40E_MAX_DATA_PER_TXD	8192  /* Tx Descriptors needed, worst case */  #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) -#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4)  #define I40E_TX_FLAGS_CSUM		(u32)(1)  #define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1) @@ -102,23 +131,21 @@  #define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)  #define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)  #define I40E_TX_FLAGS_FSO		(u32)(1 << 7) -#define I40E_TX_FLAGS_TXSW		(u32)(1 << 8) -#define I40E_TX_FLAGS_MAPPED_AS_PAGE	(u32)(1 << 9) +#define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)  #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000  #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000  #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29  #define I40E_TX_FLAGS_VLAN_SHIFT	16  struct i40e_tx_buffer { -	struct sk_buff *skb; -	dma_addr_t dma; -	unsigned long time_stamp; -	u16 length; -	u32 tx_flags;  	struct i40e_tx_desc *next_to_watch; +	unsigned long time_stamp; +	struct sk_buff *skb;  	unsigned int bytecount; -	u16 gso_segs; -	u8 mapped_as_page; +	unsigned short gso_segs; +	DEFINE_DMA_UNMAP_ADDR(dma); +	DEFINE_DMA_UNMAP_LEN(len); +	u32 tx_flags;  };  struct i40e_rx_buffer { @@ -129,21 +156,21 @@ struct i40e_rx_buffer {  	unsigned int page_offset;  }; -struct i40e_tx_queue_stats { +struct i40e_queue_stats {  	u64 packets;  	u64 bytes; +}; + +struct i40e_tx_queue_stats {  	u64 restart_queue;  	u64 tx_busy; -	u64 completed;  	u64 tx_done_old;  };  struct i40e_rx_queue_stats { -	u64 packets; -	u64 bytes;  	u64 non_eop_descs; -	u64 alloc_rx_page_failed; -	u64 alloc_rx_buff_failed; +	u64 alloc_page_failed; +	u64 alloc_buff_failed;  };  enum i40e_ring_state_t { @@ -152,7 +179,6 @@ enum i40e_ring_state_t {  	__I40E_TX_DETECT_HANG,  	__I40E_HANG_CHECK_ARMED,  	__I40E_RX_PS_ENABLED, -	__I40E_RX_LRO_ENABLED,  	__I40E_RX_16BYTE_DESC_ENABLED,  }; @@ -168,12 +194,6 @@ enum i40e_ring_state_t {  	set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)  #define clear_check_for_tx_hang(ring) \  	clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state) -#define ring_is_lro_enabled(ring) \ -	test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state) -#define set_ring_lro_enabled(ring) \ -	set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state) -#define clear_ring_lro_enabled(ring) \ -	clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)  #define ring_is_16byte_desc_enabled(ring) \  	test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)  #define set_ring_16byte_desc_enabled(ring) \ @@ -183,6 +203,7 @@ enum i40e_ring_state_t {  /* struct that defines a descriptor ring, associated with a VSI */  struct i40e_ring { +	struct i40e_ring *next;		/* pointer to next ring in q_vector */  	void *desc;			/* Descriptor ring memory */  	struct device *dev;		/* Used for DMA mapping */  	struct net_device *netdev;	/* netdev ring maps to */ @@ -216,9 +237,13 @@ struct i40e_ring {  	u8 atr_sample_rate;  	u8 atr_count; +	unsigned long last_rx_timestamp; +  	bool ring_active;		/* is ring online or not */  	/* stats structs */ +	struct i40e_queue_stats	stats; +	struct u64_stats_sync syncp;  	union {  		struct i40e_tx_queue_stats tx_stats;  		struct i40e_rx_queue_stats rx_stats; @@ -229,6 +254,8 @@ struct i40e_ring {  	struct i40e_vsi *vsi;		/* Backreference to associated VSI */  	struct i40e_q_vector *q_vector;	/* Backreference to associated vector */ + +	struct rcu_head rcu;		/* to avoid race on free */  } ____cacheline_internodealigned_in_smp;  enum i40e_latency_range { @@ -238,9 +265,8 @@ enum i40e_latency_range {  };  struct i40e_ring_container { -#define I40E_MAX_RINGPAIR_PER_VECTOR 8  	/* array of pointers to rings */ -	struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR]; +	struct i40e_ring *ring;  	unsigned int total_bytes;	/* total bytes processed this int */  	unsigned int total_packets;	/* total packets processed this int */  	u16 count; @@ -248,6 +274,10 @@ struct i40e_ring_container {  	u16 itr;  }; +/* iterator for handling rings in ring container */ +#define i40e_for_each_ring(pos, head) \ +	for (pos = (head).ring; pos != NULL; pos = pos->next) +  void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);  netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);  void i40e_clean_tx_ring(struct i40e_ring *tx_ring); @@ -257,3 +287,4 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);  void i40e_free_tx_resources(struct i40e_ring *tx_ring);  void i40e_free_rx_resources(struct i40e_ring *rx_ring);  int i40e_napi_poll(struct napi_struct *napi, int budget); +#endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index f3f22b20f02..9d39ff23c5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -36,47 +35,36 @@  #include "i40e_lan_hmc.h"  /* Device IDs */ -#define I40E_SFP_XL710_DEVICE_ID	0x1572 -#define I40E_SFP_X710_DEVICE_ID		0x1573 -#define I40E_QEMU_DEVICE_ID		0x1574 -#define I40E_KX_A_DEVICE_ID		0x157F -#define I40E_KX_B_DEVICE_ID		0x1580 -#define I40E_KX_C_DEVICE_ID		0x1581 -#define I40E_KX_D_DEVICE_ID		0x1582 -#define I40E_QSFP_A_DEVICE_ID		0x1583 -#define I40E_QSFP_B_DEVICE_ID		0x1584 -#define I40E_QSFP_C_DEVICE_ID		0x1585 -#define I40E_VF_DEVICE_ID		0x154C -#define I40E_VF_HV_DEVICE_ID		0x1571 - -#define I40E_FW_API_VERSION_MAJOR  0x0001 -#define I40E_FW_API_VERSION_MINOR  0x0000 +#define I40E_DEV_ID_SFP_XL710		0x1572 +#define I40E_DEV_ID_QEMU		0x1574 +#define I40E_DEV_ID_KX_A		0x157F +#define I40E_DEV_ID_KX_B		0x1580 +#define I40E_DEV_ID_KX_C		0x1581 +#define I40E_DEV_ID_QSFP_A		0x1583 +#define I40E_DEV_ID_QSFP_B		0x1584 +#define I40E_DEV_ID_QSFP_C		0x1585 +#define I40E_DEV_ID_VF			0x154C +#define I40E_DEV_ID_VF_HV		0x1571 + +#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \ +					 (d) == I40E_DEV_ID_QSFP_B  || \ +					 (d) == I40E_DEV_ID_QSFP_C)  #define I40E_MAX_VSI_QP			16  #define I40E_MAX_VF_VSI			3  #define I40E_MAX_CHAINED_RX_BUFFERS	5 +#define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16  /* Max default timeout in ms, */  #define I40E_MAX_NVM_TIMEOUT		18000 -/* Check whether address is multicast.  This is little-endian specific check.*/ -#define I40E_IS_MULTICAST(address)	\ -	(bool)(((u8 *)(address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define I40E_IS_BROADCAST(address)	\ -	((((u8 *)(address))[0] == ((u8)0xff)) && \ -	(((u8 *)(address))[1] == ((u8)0xff))) - -/* Switch from mc to the 2usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time)		(((time) * 1000) / 2) +/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ +#define I40E_MS_TO_GTIME(time)		((time) * 1000)  /* forward declaration */  struct i40e_hw;  typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); -#define I40E_ETH_LENGTH_OF_ADDRESS	6 -  /* Data type manipulation macros. */  #define I40E_DESC_UNUSED(R)	\ @@ -85,9 +73,10 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);  /* bitfields for Tx queue mapping in QTX_CTL */  #define I40E_QTX_CTL_VF_QUEUE	0x0 +#define I40E_QTX_CTL_VM_QUEUE	0x1  #define I40E_QTX_CTL_PF_QUEUE	0x2 -/* debug masks */ +/* debug masks - set these bits in hw->debug_mask to control output */  enum i40e_debug_mask {  	I40E_DEBUG_INIT			= 0x00000001,  	I40E_DEBUG_RELEASE		= 0x00000002, @@ -100,11 +89,12 @@ enum i40e_debug_mask {  	I40E_DEBUG_FLOW			= 0x00000200,  	I40E_DEBUG_DCB			= 0x00000400,  	I40E_DEBUG_DIAG			= 0x00000800, +	I40E_DEBUG_FD			= 0x00001000, -	I40E_DEBUG_AQ_MESSAGE		= 0x01000000, /* for i40e_debug() */ +	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,  	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,  	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000, -	I40E_DEBUG_AQ_COMMAND		= 0x06000000, /* for i40e_debug_aq() */ +	I40E_DEBUG_AQ_COMMAND		= 0x06000000,  	I40E_DEBUG_AQ			= 0x0F000000,  	I40E_DEBUG_USER			= 0xF0000000, @@ -134,6 +124,7 @@ enum i40e_media_type {  	I40E_MEDIA_TYPE_BASET,  	I40E_MEDIA_TYPE_BACKPLANE,  	I40E_MEDIA_TYPE_CX4, +	I40E_MEDIA_TYPE_DA,  	I40E_MEDIA_TYPE_VIRTUAL  }; @@ -171,8 +162,12 @@ struct i40e_link_status {  	u8 link_info;  	u8 an_info;  	u8 ext_info; +	u8 loopback;  	/* is Link Status Event notification to SW enabled */  	bool lse_enable; +	u16 max_frame_size; +	bool crc_enable; +	u8 pacing;  };  struct i40e_phy_info { @@ -236,9 +231,9 @@ struct i40e_hw_capabilities {  struct i40e_mac_info {  	enum i40e_mac_type type; -	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; -	u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS]; -	u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS]; +	u8 addr[ETH_ALEN]; +	u8 perm_addr[ETH_ALEN]; +	u8 san_addr[ETH_ALEN];  	u16 max_fcoeq;  }; @@ -415,6 +410,7 @@ struct i40e_driver_version {  	u8 minor_version;  	u8 build_version;  	u8 subbuild_version; +	u8 driver_string[32];  };  /* RX Descriptors */ @@ -465,6 +461,10 @@ union i40e_32byte_rx_desc {  			union {  				__le32 rss; /* RSS Hash */  				__le32 fcoe_param; /* FCoE DDP Context id */ +				/* Flow director filter id in case of +				 * Programming status desc WB +				 */ +				__le32 fd_id;  			} hi_dword;  		} qword0;  		struct { @@ -490,9 +490,6 @@ union i40e_32byte_rx_desc {  	} wb;  /* writeback */  }; -#define I40E_RXD_QW1_STATUS_SHIFT	0 -#define I40E_RXD_QW1_STATUS_MASK	(0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT) -  enum i40e_rx_desc_status_bits {  	/* Note: These are predefined bit offsets */  	I40E_RX_DESC_STATUS_DD_SHIFT		= 0, @@ -500,18 +497,31 @@ enum i40e_rx_desc_status_bits {  	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,  	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,  	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4, -	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 3 BITS */ +	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */ +	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,  	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,  	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */  	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,  	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */ -	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14 +	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14, +	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15, +	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */ +	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18, +	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */  }; +#define I40E_RXD_QW1_STATUS_SHIFT	0 +#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \ +					 << I40E_RXD_QW1_STATUS_SHIFT) +  #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x7UL << \ +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \  					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \ +					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) +  enum i40e_rx_desc_fltstat_values {  	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,  	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */ @@ -531,7 +541,8 @@ enum i40e_rx_desc_error_bits {  	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,  	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,  	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5, -	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6 +	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6, +	I40E_RX_DESC_ERROR_PPRS_SHIFT		= 7  };  enum i40e_rx_desc_error_l3l4e_fcoe_masks { @@ -547,28 +558,32 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {  /* Packet type non-ip values */  enum i40e_rx_l2_ptype { -	I40E_RX_PTYPE_L2_RESERVED		= 0, -	I40E_RX_PTYPE_L2_MAC_PAY2		= 1, -	I40E_RX_PTYPE_L2_TIMESYNC_PAY2		= 2, -	I40E_RX_PTYPE_L2_FIP_PAY2		= 3, -	I40E_RX_PTYPE_L2_OUI_PAY2		= 4, -	I40E_RX_PTYPE_L2_MACCNTRL_PAY2		= 5, -	I40E_RX_PTYPE_L2_LLDP_PAY2		= 6, -	I40E_RX_PTYPE_L2_ECP_PAY2		= 7, -	I40E_RX_PTYPE_L2_EVB_PAY2		= 8, -	I40E_RX_PTYPE_L2_QCN_PAY2		= 9, -	I40E_RX_PTYPE_L2_EAPOL_PAY2		= 10, -	I40E_RX_PTYPE_L2_ARP			= 11, -	I40E_RX_PTYPE_L2_FCOE_PAY3		= 12, -	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3	= 13, -	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3	= 14, -	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3	= 15, -	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA	= 16, -	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3		= 17, -	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA	= 18, -	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY		= 19, -	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP		= 20, -	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER	= 21 +	I40E_RX_PTYPE_L2_RESERVED			= 0, +	I40E_RX_PTYPE_L2_MAC_PAY2			= 1, +	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2, +	I40E_RX_PTYPE_L2_FIP_PAY2			= 3, +	I40E_RX_PTYPE_L2_OUI_PAY2			= 4, +	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5, +	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6, +	I40E_RX_PTYPE_L2_ECP_PAY2			= 7, +	I40E_RX_PTYPE_L2_EVB_PAY2			= 8, +	I40E_RX_PTYPE_L2_QCN_PAY2			= 9, +	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10, +	I40E_RX_PTYPE_L2_ARP				= 11, +	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12, +	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13, +	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14, +	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15, +	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16, +	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17, +	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18, +	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19, +	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20, +	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21, +	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58, +	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87, +	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124, +	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153  };  struct i40e_rx_ptype_decoded { @@ -648,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {  	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,  	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */  	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */ -	I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT	= 6, /* 3 BITS */  	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,  	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,  	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11, @@ -693,7 +707,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {  enum i40e_rx_prog_status_desc_error_bits {  	/* Note: These are predefined bit offsets */  	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0, -	I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT	= 1, +	I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT	= 1,  	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,  	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3  }; @@ -852,22 +866,14 @@ struct i40e_filter_program_desc {  /* Packet Classifier Types for filters */  enum i40e_filter_pctype { -	/* Note: Value 0-25 are reserved for future use */ -	I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP		= 26, -	I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP		= 27, -	I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP		= 28, -	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29, -	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30, +	/* Note: Values 0-30 are reserved for future use */  	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31, -	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN		= 32, +	/* Note: Value 32 is reserved for future use */  	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,  	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,  	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,  	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36, -	/* Note: Value 37 is reserved for future use */ -	I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP		= 38, -	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39, -	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40, +	/* Note: Values 37-40 are reserved for future use */  	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,  	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN		= 42,  	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43, @@ -877,7 +883,8 @@ enum i40e_filter_pctype {  	/* Note: Value 47 is reserved for future use */  	I40E_FILTER_PCTYPE_FCOE_OX			= 48,  	I40E_FILTER_PCTYPE_FCOE_RX			= 49, -	/* Note: Value 50-62 are reserved for future use */ +	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50, +	/* Note: Values 51-62 are reserved for future use */  	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,  }; @@ -948,6 +955,16 @@ struct i40e_vsi_context {  	struct i40e_aqc_vsi_properties_data info;  }; +struct i40e_veb_context { +	u16 seid; +	u16 uplink_seid; +	u16 veb_number; +	u16 vebs_allocated; +	u16 vebs_unallocated; +	u16 flags; +	struct i40e_aqc_get_veb_parameters_completion info; +}; +  /* Statistics collected by each port, VSI, VEB, and S-channel */  struct i40e_eth_stats {  	u64 rx_bytes;			/* gorc */ @@ -955,8 +972,6 @@ struct i40e_eth_stats {  	u64 rx_multicast;		/* mprc */  	u64 rx_broadcast;		/* bprc */  	u64 rx_discards;		/* rdpc */ -	u64 rx_errors;			/* repc */ -	u64 rx_missed;			/* rmpc */  	u64 rx_unknown_protocol;	/* rupp */  	u64 tx_bytes;			/* gotc */  	u64 tx_unicast;			/* uptc */ @@ -1008,12 +1023,21 @@ struct i40e_hw_port_stats {  	u64 tx_size_big;		/* ptc9522 */  	u64 mac_short_packet_dropped;	/* mspdc */  	u64 checksum_error;		/* xec */ +	/* flow director stats */ +	u64 fd_atr_match; +	u64 fd_sb_match; +	/* EEE LPI */ +	u32 tx_lpi_status; +	u32 rx_lpi_status; +	u64 tx_lpi_count;		/* etlpic */ +	u64 rx_lpi_count;		/* erlpic */  };  /* Checksum and Shadow RAM pointers */  #define I40E_SR_NVM_CONTROL_WORD		0x00  #define I40E_SR_EMP_MODULE_PTR			0x0F  #define I40E_SR_NVM_IMAGE_VERSION		0x18 +#define I40E_SR_NVM_WAKE_ON_LAN			0x19  #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27  #define I40E_SR_NVM_EETRACK_LO			0x2D  #define I40E_SR_NVM_EETRACK_HI			0x2E @@ -1138,17 +1162,4 @@ enum i40e_reset_type {  	I40E_RESET_GLOBR	= 2,  	I40E_RESET_EMPR		= 3,  }; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR		0xF -struct i40e_lldp_variables { -	u16 length; -	u16 adminstatus; -	u16 msgfasttx; -	u16 msgtxinterval; -	u16 txparams; -	u16 timers; -	u16 crc8; -}; -  #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index cc6654f1dac..70951d2edca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -142,7 +141,7 @@ struct i40e_virtchnl_vsi_resource {  	u16 num_queue_pairs;  	enum i40e_vsi_type vsi_type;  	u16 qset_handle; -	u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS]; +	u8 default_mac_addr[ETH_ALEN];  };  /* VF offload flags */  #define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001 @@ -265,7 +264,7 @@ struct i40e_virtchnl_queue_select {   */  struct i40e_virtchnl_ether_addr { -	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; +	u8 addr[ETH_ALEN];  	u8 pad[2];  }; @@ -342,10 +341,6 @@ struct i40e_virtchnl_pf_event {  	int severity;  }; -/* The following are TBD, not necessary for LAN functionality. - * I40E_VIRTCHNL_OP_FCOE - */ -  /* VF reset states - these are written into the RSTAT register:   * I40E_VFGEN_RSTAT1 on the PF   * I40E_VFGEN_RSTAT on the VF diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 8967e58e240..f5b9d206257 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -30,6 +29,24 @@  /***********************misc routines*****************************/  /** + * i40e_vc_disable_vf + * @pf: pointer to the pf info + * @vf: pointer to the vf info + * + * Disable the VF through a SW reset + **/ +static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) +{ +	struct i40e_hw *hw = &pf->hw; +	u32 reg; + +	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); +	reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; +	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); +	i40e_flush(hw); +} + +/**   * i40e_vc_isvalid_vsi_id   * @vf: pointer to the vf info   * @vsi_id: vf relative vsi id @@ -102,130 +119,6 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,  }  /** - * i40e_ctrl_vsi_tx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct - * @vsi_queue_id: vsi relative queue index - * @ctrl: control flags - * - * enable/disable/enable check/disable check - **/ -static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, -				  u16 vsi_queue_id, -				  enum i40e_queue_ctrl ctrl) -{ -	struct i40e_pf *pf = vf->pf; -	struct i40e_hw *hw = &pf->hw; -	bool writeback = false; -	u16 pf_queue_id; -	int ret = 0; -	u32 reg; - -	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); -	reg = rd32(hw, I40E_QTX_ENA(pf_queue_id)); - -	switch (ctrl) { -	case I40E_QUEUE_CTRL_ENABLE: -		reg |= I40E_QTX_ENA_QENA_REQ_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_ENABLECHECK: -		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; -		break; -	case I40E_QUEUE_CTRL_DISABLE: -		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_DISABLECHECK: -		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; -		break; -	case I40E_QUEUE_CTRL_FASTDISABLE: -		reg |= I40E_QTX_ENA_FAST_QDIS_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_FASTDISABLECHECK: -		ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0; -		if (!ret) { -			reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK; -			writeback = true; -		} -		break; -	default: -		ret = -EINVAL; -		break; -	} - -	if (writeback) { -		wr32(hw, I40E_QTX_ENA(pf_queue_id), reg); -		i40e_flush(hw); -	} - -	return ret; -} - -/** - * i40e_ctrl_vsi_rx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct - * @vsi_queue_id: vsi relative queue index - * @ctrl: control flags - * - * enable/disable/enable check/disable check - **/ -static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, -				  u16 vsi_queue_id, -				  enum i40e_queue_ctrl ctrl) -{ -	struct i40e_pf *pf = vf->pf; -	struct i40e_hw *hw = &pf->hw; -	bool writeback = false; -	u16 pf_queue_id; -	int ret = 0; -	u32 reg; - -	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); -	reg = rd32(hw, I40E_QRX_ENA(pf_queue_id)); - -	switch (ctrl) { -	case I40E_QUEUE_CTRL_ENABLE: -		reg |= I40E_QRX_ENA_QENA_REQ_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_ENABLECHECK: -		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM; -		break; -	case I40E_QUEUE_CTRL_DISABLE: -		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_DISABLECHECK: -		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; -		break; -	case I40E_QUEUE_CTRL_FASTDISABLE: -		reg |= I40E_QRX_ENA_FAST_QDIS_MASK; -		writeback = true; -		break; -	case I40E_QUEUE_CTRL_FASTDISABLECHECK: -		ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0; -		if (!ret) { -			reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK; -			writeback = true; -		} -		break; -	default: -		ret = -EINVAL; -		break; -	} - -	if (writeback) { -		wr32(hw, I40E_QRX_ENA(pf_queue_id), reg); -		i40e_flush(hw); -	} - -	return ret; -} - -/**   * i40e_config_irq_link_list   * @vf: pointer to the vf info   * @vsi_idx: index of VSI in PF struct @@ -251,8 +144,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,  		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);  	else  		reg_idx = I40E_VPINT_LNKLSTN( -			    ((pf->hw.func_caps.num_msix_vectors_vf - 1) -					      * vf->vf_id) + (vector_id - 1)); +		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + +		     (vector_id - 1));  	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {  		/* Special case - No queues mapped on this vector */ @@ -260,23 +153,17 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,  		goto irq_list_done;  	}  	tempmap = vecmap->rxq_map; -	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (vsi_queue_id < I40E_MAX_VSI_QP) { +	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {  		linklistmap |= (1 <<  				(I40E_VIRTCHNL_SUPPORTED_QTYPES *  				 vsi_queue_id)); -		vsi_queue_id = -		    find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);  	}  	tempmap = vecmap->txq_map; -	vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (vsi_queue_id < I40E_MAX_VSI_QP) { +	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {  		linklistmap |= (1 <<  				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id  				 + 1)); -		vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					     vsi_queue_id + 1);  	}  	next_q = find_first_bit(&linklistmap, @@ -307,7 +194,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,  				       (I40E_MAX_VSI_QP *  					I40E_VIRTCHNL_SUPPORTED_QTYPES),  				       next_q + 1); -		if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { +		if (next_q < +		    (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {  			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;  			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;  			pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, @@ -360,6 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,  	tx_ctx.qlen = info->ring_len;  	tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);  	tx_ctx.rdylist_act = 0; +	tx_ctx.head_wb_ena = info->headwb_enabled; +	tx_ctx.head_wb_addr = info->dma_headwb_addr;  	/* clear the context in the HMC */  	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); @@ -383,7 +273,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,  	/* associate this queue with the PCI VF function */  	qtx_ctl = I40E_QTX_CTL_VF_QUEUE; -	qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) +	qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)  		    & I40E_QTX_CTL_PF_INDX_MASK);  	qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)  		     << I40E_QTX_CTL_VFVM_INDX_SHIFT) @@ -463,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,  	rx_ctx.tphhead_ena = 1;  	rx_ctx.lrxqthresh = 2;  	rx_ctx.crcstrip = 1; +	rx_ctx.prefena = 1;  	/* clear the context in the HMC */  	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); @@ -499,7 +390,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)  {  	struct i40e_mac_filter *f = NULL;  	struct i40e_pf *pf = vf->pf; -	struct i40e_hw *hw = &pf->hw;  	struct i40e_vsi *vsi;  	int ret = 0; @@ -513,167 +403,47 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)  		goto error_alloc_vsi_res;  	}  	if (type == I40E_VSI_SRIOV) { +		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};  		vf->lan_vsi_index = vsi->idx;  		vf->lan_vsi_id = vsi->id;  		dev_info(&pf->pdev->dev, -			 "LAN VSI index %d, VSI id %d\n", -			 vsi->idx, vsi->id); +			 "VF %d assigned LAN VSI index %d, VSI id %d\n", +			 vf->vf_id, vsi->idx, vsi->id); +		/* If the port VLAN has been configured and then the +		 * VF driver was removed then the VSI port VLAN +		 * configuration was destroyed.  Check if there is +		 * a port VLAN and restore the VSI configuration if +		 * needed. +		 */ +		if (vf->port_vlan_id) +			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);  		f = i40e_add_filter(vsi, vf->default_lan_addr.addr, -				    0, true, false); -	} -	if (!f) { -		dev_err(&pf->pdev->dev, "Unable to add ucast filter\n"); -		ret = -ENOMEM; -		goto error_alloc_vsi_res; +				    vf->port_vlan_id, true, false); +		if (!f) +			dev_info(&pf->pdev->dev, +				 "Could not allocate VF MAC addr\n"); +		f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id, +				    true, false); +		if (!f) +			dev_info(&pf->pdev->dev, +				 "Could not allocate VF broadcast filter\n");  	}  	/* program mac filter */  	ret = i40e_sync_vsi_filters(vsi); -	if (ret) { +	if (ret)  		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); -		goto error_alloc_vsi_res; -	} - -	/* accept bcast pkts. by default */ -	ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); -	if (ret) { -		dev_err(&pf->pdev->dev, -			"set vsi bcast failed for vf %d, vsi %d, aq_err %d\n", -			vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status); -		ret = -EINVAL; -	} - -error_alloc_vsi_res: -	return ret; -} - -/** - * i40e_reset_vf - * @vf: pointer to the vf structure - * @flr: VFLR was issued or not - * - * reset the vf - **/ -int i40e_reset_vf(struct i40e_vf *vf, bool flr) -{ -	int ret = -ENOENT; -	struct i40e_pf *pf = vf->pf; -	struct i40e_hw *hw = &pf->hw; -	u32 reg, reg_idx, msix_vf; -	bool rsd = false; -	u16 pf_queue_id; -	int i, j; - -	/* warn the VF */ -	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS); - -	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); -	/* PF triggers VFR only when VF requests, in case of -	 * VFLR, HW triggers VFR -	 */ -	if (!flr) { -		/* reset vf using VPGEN_VFRTRIG reg */ -		reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK; -		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); -		i40e_flush(hw); -	} - -	/* poll VPGEN_VFRSTAT reg to make sure -	 * that reset is complete -	 */ -	for (i = 0; i < 4; i++) { -		/* vf reset requires driver to first reset the -		 * vf & than poll the status register to make sure -		 * that the requested op was completed -		 * successfully -		 */ -		udelay(10); -		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); -		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { -			rsd = true; -			break; -		} -	} - -	if (!rsd) -		dev_err(&pf->pdev->dev, "VF reset check timeout %d\n", -			vf->vf_id); - -	/* fast disable qps */ -	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { -		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, -					     I40E_QUEUE_CTRL_FASTDISABLE); -		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, -					     I40E_QUEUE_CTRL_FASTDISABLE); -	} - -	/* Queue enable/disable requires driver to -	 * first reset the vf & than poll the status register -	 * to make sure that the requested op was completed -	 * successfully -	 */ -	udelay(10); -	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { -		ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j, -					     I40E_QUEUE_CTRL_FASTDISABLECHECK); +	/* Set VF bandwidth if specified */ +	if (vf->tx_rate) { +		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, +						  vf->tx_rate / 50, 0, NULL);  		if (ret) -			dev_info(&pf->pdev->dev, -				 "Queue control check failed on Tx queue %d of VSI %d VF %d\n", -				 vf->lan_vsi_index, j, vf->vf_id); -		ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j, -					     I40E_QUEUE_CTRL_FASTDISABLECHECK); -		if (ret) -			dev_info(&pf->pdev->dev, -				 "Queue control check failed on Rx queue %d of VSI %d VF %d\n", -				 vf->lan_vsi_index, j, vf->vf_id); +			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", +				vf->vf_id, ret);  	} -	/* clear the irq settings */ -	msix_vf = pf->hw.func_caps.num_msix_vectors_vf; -	for (i = 0; i < msix_vf; i++) { -		/* format is same for both registers */ -		if (0 == i) -			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); -		else -			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * -						      (vf->vf_id)) -						     + (i - 1)); -		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | -		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); -		wr32(hw, reg_idx, reg); -		i40e_flush(hw); -	} -	/* disable interrupts so the VF starts in a known state */ -	for (i = 0; i < msix_vf; i++) { -		/* format is same for both registers */ -		if (0 == i) -			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); -		else -			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * -						      (vf->vf_id)) -						     + (i - 1)); -		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); -		i40e_flush(hw); -	} - -	/* set the defaults for the rqctl & tqctl registers */ -	reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK | -	       I40E_QINT_RQCTL_NEXTQ_TYPE_MASK); -	for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { -		pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); -		wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg); -		wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg); -	} - -	/* clear the reset bit in the VPGEN_VFRTRIG reg */ -	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); -	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; -	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); -	/* tell the VF the reset is done */ -	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); -	i40e_flush(hw); - +error_alloc_vsi_res:  	return ret;  } @@ -756,6 +526,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)  static void i40e_free_vf_res(struct i40e_vf *vf)  {  	struct i40e_pf *pf = vf->pf; +	struct i40e_hw *hw = &pf->hw; +	u32 reg_idx, reg; +	int i, msix_vf;  	/* free vsi & disconnect it from the parent uplink */  	if (vf->lan_vsi_index) { @@ -763,6 +536,35 @@ static void i40e_free_vf_res(struct i40e_vf *vf)  		vf->lan_vsi_index = 0;  		vf->lan_vsi_id = 0;  	} +	msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + +	/* disable interrupts so the VF starts in a known state */ +	for (i = 0; i < msix_vf; i++) { +		/* format is same for both registers */ +		if (0 == i) +			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id); +		else +			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) * +						      (vf->vf_id)) +						     + (i - 1)); +		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); +		i40e_flush(hw); +	} + +	/* clear the irq settings */ +	for (i = 0; i < msix_vf; i++) { +		/* format is same for both registers */ +		if (0 == i) +			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); +		else +			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) * +						      (vf->vf_id)) +						     + (i - 1)); +		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | +		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); +		wr32(hw, reg_idx, reg); +		i40e_flush(hw); +	}  	/* reset some of the state varibles keeping  	 * track of the resources  	 */ @@ -804,6 +606,111 @@ error_alloc:  	return ret;  } +#define VF_DEVICE_STATUS 0xAA +#define VF_TRANS_PENDING_MASK 0x20 +/** + * i40e_quiesce_vf_pci + * @vf: pointer to the vf structure + * + * Wait for VF PCI transactions to be cleared after reset. Returns -EIO + * if the transactions never clear. + **/ +static int i40e_quiesce_vf_pci(struct i40e_vf *vf) +{ +	struct i40e_pf *pf = vf->pf; +	struct i40e_hw *hw = &pf->hw; +	int vf_abs_id, i; +	u32 reg; + +	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id; + +	wr32(hw, I40E_PF_PCI_CIAA, +	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); +	for (i = 0; i < 100; i++) { +		reg = rd32(hw, I40E_PF_PCI_CIAD); +		if ((reg & VF_TRANS_PENDING_MASK) == 0) +			return 0; +		udelay(1); +	} +	return -EIO; +} + +/** + * i40e_reset_vf + * @vf: pointer to the vf structure + * @flr: VFLR was issued or not + * + * reset the vf + **/ +void i40e_reset_vf(struct i40e_vf *vf, bool flr) +{ +	struct i40e_pf *pf = vf->pf; +	struct i40e_hw *hw = &pf->hw; +	bool rsd = false; +	int i; +	u32 reg; + +	/* warn the VF */ +	clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + +	/* In the case of a VFLR, the HW has already reset the VF and we +	 * just need to clean up, so don't hit the VFRTRIG register. +	 */ +	if (!flr) { +		/* reset vf using VPGEN_VFRTRIG reg */ +		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); +		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; +		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); +		i40e_flush(hw); +	} + +	if (i40e_quiesce_vf_pci(vf)) +		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", +			vf->vf_id); + +	/* poll VPGEN_VFRSTAT reg to make sure +	 * that reset is complete +	 */ +	for (i = 0; i < 100; i++) { +		/* vf reset requires driver to first reset the +		 * vf & than poll the status register to make sure +		 * that the requested op was completed +		 * successfully +		 */ +		udelay(10); +		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); +		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { +			rsd = true; +			break; +		} +	} + +	if (!rsd) +		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", +			vf->vf_id); +	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED); +	/* clear the reset bit in the VPGEN_VFRTRIG reg */ +	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); +	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; +	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); + +	/* On initial reset, we won't have any queues */ +	if (vf->lan_vsi_index == 0) +		goto complete_reset; + +	i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); +complete_reset: +	/* reallocate vf resources to reset the VSI state */ +	i40e_free_vf_res(vf); +	i40e_alloc_vf_res(vf); +	i40e_enable_vf_mappings(vf); +	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); + +	/* tell the VF the reset is done */ +	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); +	i40e_flush(hw); +} +  /**   * i40e_vfs_are_assigned   * @pf: pointer to the pf structure @@ -816,7 +723,7 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)  	struct pci_dev *vfdev;  	/* loop through all the VFs to see if we own any that are assigned */ -	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL); +	vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL);  	while (vfdev) {  		/* if we don't own it we don't care */  		if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { @@ -826,12 +733,82 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)  		}  		vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, -				       I40E_VF_DEVICE_ID, +				       I40E_DEV_ID_VF,  				       vfdev);  	}  	return false;  } +#ifdef CONFIG_PCI_IOV + +/** + * i40e_enable_pf_switch_lb + * @pf: pointer to the pf structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ +	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; +	struct i40e_vsi_context ctxt; +	int aq_ret; + +	ctxt.seid = pf->main_vsi_seid; +	ctxt.pf_num = pf->hw.pf_id; +	ctxt.vf_num = 0; +	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); +	if (aq_ret) { +		dev_info(&pf->pdev->dev, +			 "%s couldn't get pf vsi config, err %d, aq_err %d\n", +			 __func__, aq_ret, pf->hw.aq.asq_last_status); +		return; +	} +	ctxt.flags = I40E_AQ_VSI_TYPE_PF; +	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); +	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + +	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); +	if (aq_ret) { +		dev_info(&pf->pdev->dev, +			 "%s: update vsi switch failed, aq_err=%d\n", +			 __func__, vsi->back->hw.aq.asq_last_status); +	} +} +#endif + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the pf structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ +	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; +	struct i40e_vsi_context ctxt; +	int aq_ret; + +	ctxt.seid = pf->main_vsi_seid; +	ctxt.pf_num = pf->hw.pf_id; +	ctxt.vf_num = 0; +	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); +	if (aq_ret) { +		dev_info(&pf->pdev->dev, +			 "%s couldn't get pf vsi config, err %d, aq_err %d\n", +			 __func__, aq_ret, pf->hw.aq.asq_last_status); +		return; +	} +	ctxt.flags = I40E_AQ_VSI_TYPE_PF; +	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); +	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + +	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); +	if (aq_ret) { +		dev_info(&pf->pdev->dev, +			 "%s: update vsi switch failed, aq_err=%d\n", +			 __func__, vsi->back->hw.aq.asq_last_status); +	} +}  /**   * i40e_free_vfs @@ -842,17 +819,20 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)  void i40e_free_vfs(struct i40e_pf *pf)  {  	struct i40e_hw *hw = &pf->hw; -	int i; +	u32 reg_idx, bit_idx; +	int i, tmp, vf_id;  	if (!pf->vf)  		return;  	/* Disable interrupt 0 so we don't try to handle the VFLR. */ -	wr32(hw, I40E_PFINT_DYN_CTL0, 0); -	i40e_flush(hw); +	i40e_irq_dynamic_disable_icr0(pf); +	mdelay(10); /* let any messages in transit get finished up */  	/* free up vf resources */ -	for (i = 0; i < pf->num_alloc_vfs; i++) { +	tmp = pf->num_alloc_vfs; +	pf->num_alloc_vfs = 0; +	for (i = 0; i < tmp; i++) {  		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))  			i40e_free_vf_res(&pf->vf[i]);  		/* disable qp mappings */ @@ -861,20 +841,29 @@ void i40e_free_vfs(struct i40e_pf *pf)  	kfree(pf->vf);  	pf->vf = NULL; -	pf->num_alloc_vfs = 0; -	if (!i40e_vfs_are_assigned(pf)) +	/* This check is for when the driver is unloaded while VFs are +	 * assigned. Setting the number of VFs to 0 through sysfs is caught +	 * before this function ever gets called. +	 */ +	if (!i40e_vfs_are_assigned(pf)) {  		pci_disable_sriov(pf->pdev); -	else +		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to +		 * work correctly when SR-IOV gets re-enabled. +		 */ +		for (vf_id = 0; vf_id < tmp; vf_id++) { +			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; +			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; +			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); +		} +		i40e_disable_pf_switch_lb(pf); +	} else {  		dev_warn(&pf->pdev->dev,  			 "unable to disable SR-IOV because VFs are assigned.\n"); +	}  	/* Re-enable interrupt 0. */ -	wr32(hw, I40E_PFINT_DYN_CTL0, -	     I40E_PFINT_DYN_CTL0_INTENA_MASK | -	     I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | -	     (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)); -	i40e_flush(hw); +	i40e_irq_dynamic_enable_icr0(pf);  }  #ifdef CONFIG_PCI_IOV @@ -885,25 +874,31 @@ void i40e_free_vfs(struct i40e_pf *pf)   *   * allocate vf resources   **/ -static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)  {  	struct i40e_vf *vfs;  	int i, ret = 0; -	ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); -	if (ret) { -		dev_err(&pf->pdev->dev, -			"pci_enable_sriov failed with error %d!\n", ret); -		pf->num_alloc_vfs = 0; -		goto err_iov; -	} +	/* Disable interrupt 0 so we don't try to handle the VFLR. */ +	i40e_irq_dynamic_disable_icr0(pf); +	/* Check to see if we're just allocating resources for extant VFs */ +	if (pci_num_vf(pf->pdev) != num_alloc_vfs) { +		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); +		if (ret) { +			dev_err(&pf->pdev->dev, +				"Failed to enable SR-IOV, error %d.\n", ret); +			pf->num_alloc_vfs = 0; +			goto err_iov; +		} +	}  	/* allocate memory */ -	vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); +	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);  	if (!vfs) {  		ret = -ENOMEM;  		goto err_alloc;  	} +	pf->vf = vfs;  	/* apply default profile */  	for (i = 0; i < num_alloc_vfs; i++) { @@ -913,22 +908,22 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)  		/* assign default capabilities */  		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); - -		ret = i40e_alloc_vf_res(&vfs[i]); -		i40e_reset_vf(&vfs[i], true); -		if (ret) -			break; +		vfs[i].spoofchk = true; +		/* vf resources get allocated during reset */ +		i40e_reset_vf(&vfs[i], false);  		/* enable vf vplan_qtable mappings */  		i40e_enable_vf_mappings(&vfs[i]);  	} -	pf->vf = vfs;  	pf->num_alloc_vfs = num_alloc_vfs; +	i40e_enable_pf_switch_lb(pf);  err_alloc:  	if (ret)  		i40e_free_vfs(pf);  err_iov: +	/* Re-enable interrupt 0. */ +	i40e_irq_dynamic_enable_icr0(pf);  	return ret;  } @@ -988,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)  	if (num_vfs)  		return i40e_pci_sriov_enable(pdev, num_vfs); -	i40e_free_vfs(pf); +	if (!i40e_vfs_are_assigned(pf)) { +		i40e_free_vfs(pf); +	} else { +		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); +		return -EINVAL; +	}  	return 0;  } @@ -1009,6 +1009,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,  {  	struct i40e_pf *pf = vf->pf;  	struct i40e_hw *hw = &pf->hw; +	int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;  	i40e_status aq_ret;  	/* single place to detect unsuccessful return values */ @@ -1028,8 +1029,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,  		vf->num_valid_msgs++;  	} -	aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, -				     msg, msglen, NULL); +	aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id,	v_opcode, v_retval, +					msg, msglen, NULL);  	if (aq_ret) {  		dev_err(&pf->pdev->dev,  			"Unable to send the message to VF %d aq_err %d\n", @@ -1144,12 +1145,10 @@ err:   * unlike other virtchnl messages, pf driver   * doesn't send the response back to the vf   **/ -static int i40e_vc_reset_vf_msg(struct i40e_vf *vf) +static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)  { -	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) -		return -ENOENT; - -	return i40e_reset_vf(vf, false); +	if (test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) +		i40e_reset_vf(vf, false);  }  /** @@ -1291,27 +1290,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		/* lookout for the invalid queue index */  		tempmap = map->rxq_map; -		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -		while (vsi_queue_id < I40E_MAX_VSI_QP) { +		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {  			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,  						      vsi_queue_id)) {  				aq_ret = I40E_ERR_PARAM;  				goto error_param;  			} -			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -						     vsi_queue_id + 1);  		}  		tempmap = map->txq_map; -		vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -		while (vsi_queue_id < I40E_MAX_VSI_QP) { +		for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {  			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,  						      vsi_queue_id)) {  				aq_ret = I40E_ERR_PARAM;  				goto error_param;  			} -			vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -						     vsi_queue_id + 1);  		}  		i40e_config_irq_link_list(vf, vsi_id, map); @@ -1337,8 +1330,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  	struct i40e_pf *pf = vf->pf;  	u16 vsi_id = vqs->vsi_id;  	i40e_status aq_ret = 0; -	unsigned long tempmap; -	u16 queue_id;  	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {  		aq_ret = I40E_ERR_PARAM; @@ -1354,66 +1345,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		aq_ret = I40E_ERR_PARAM;  		goto error_param;  	} - -	tempmap = vqs->rx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { -			aq_ret = I40E_ERR_PARAM; -			goto error_param; -		} -		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, -				       I40E_QUEUE_CTRL_ENABLE); - -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	tempmap = vqs->tx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { -			aq_ret = I40E_ERR_PARAM; -			goto error_param; -		} -		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, -				       I40E_QUEUE_CTRL_ENABLE); - -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	/* Poll the status register to make sure that the -	 * requested op was completed successfully -	 */ -	udelay(10); - -	tempmap = vqs->rx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, -					   I40E_QUEUE_CTRL_ENABLECHECK)) { -			dev_err(&pf->pdev->dev, -				"Queue control check failed on RX queue %d of VSI %d VF %d\n", -				queue_id, vsi_id, vf->vf_id); -		} -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	tempmap = vqs->tx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, -					   I40E_QUEUE_CTRL_ENABLECHECK)) { -			dev_err(&pf->pdev->dev, -				"Queue control check failed on TX queue %d of VSI %d VF %d\n", -				queue_id, vsi_id, vf->vf_id); -		} -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - +	if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) +		aq_ret = I40E_ERR_TIMEOUT;  error_param:  	/* send the response to the vf */  	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, @@ -1436,8 +1369,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  	struct i40e_pf *pf = vf->pf;  	u16 vsi_id = vqs->vsi_id;  	i40e_status aq_ret = 0; -	unsigned long tempmap; -	u16 queue_id;  	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {  		aq_ret = I40E_ERR_PARAM; @@ -1453,65 +1384,8 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		aq_ret = I40E_ERR_PARAM;  		goto error_param;  	} - -	tempmap = vqs->rx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { -			aq_ret = I40E_ERR_PARAM; -			goto error_param; -		} -		i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, -				       I40E_QUEUE_CTRL_DISABLE); - -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	tempmap = vqs->tx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) { -			aq_ret = I40E_ERR_PARAM; -			goto error_param; -		} -		i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, -				       I40E_QUEUE_CTRL_DISABLE); - -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	/* Poll the status register to make sure that the -	 * requested op was completed successfully -	 */ -	udelay(10); - -	tempmap = vqs->rx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id, -					   I40E_QUEUE_CTRL_DISABLECHECK)) { -			dev_err(&pf->pdev->dev, -				"Queue control check failed on RX queue %d of VSI %d VF %d\n", -				queue_id, vsi_id, vf->vf_id); -		} -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} - -	tempmap = vqs->tx_queues; -	queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP); -	while (queue_id < I40E_MAX_VSI_QP) { -		if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id, -					   I40E_QUEUE_CTRL_DISABLECHECK)) { -			dev_err(&pf->pdev->dev, -				"Queue control check failed on TX queue %d of VSI %d VF %d\n", -				queue_id, vsi_id, vf->vf_id); -		} -		queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP, -					 queue_id + 1); -	} +	if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) +		aq_ret = I40E_ERR_TIMEOUT;  error_param:  	/* send the response to the vf */ @@ -1554,7 +1428,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		goto error_param;  	}  	i40e_update_eth_stats(vsi); -	memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats)); +	stats = vsi->eth_stats;  error_param:  	/* send the response back to the vf */ @@ -1563,6 +1437,40 @@ error_param:  }  /** + * i40e_check_vf_permission + * @vf: pointer to the vf info + * @macaddr: pointer to the MAC Address being checked + * + * Check if the VF has permission to add or delete unicast MAC address + * filters and return error code -EPERM if not.  Then check if the + * address filter requested is broadcast or zero and if so return + * an invalid MAC address error code. + **/ +static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) +{ +	struct i40e_pf *pf = vf->pf; +	int ret = 0; + +	if (is_broadcast_ether_addr(macaddr) || +		   is_zero_ether_addr(macaddr)) { +		dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); +		ret = I40E_ERR_INVALID_MAC_ADDR; +	} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && +		   !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { +		/* If the host VMM administrator has set the VF MAC address +		 * administratively via the ndo_set_vf_mac command then deny +		 * permission to the VF to add or delete unicast MAC addresses. +		 * The VF may request to set the MAC address filter already +		 * assigned to it so do not return an error in that case. +		 */ +		dev_err(&pf->pdev->dev, +			"VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); +		ret = -EPERM; +	} +	return ret; +} + +/**   * i40e_vc_add_mac_addr_msg   * @vf: pointer to the vf info   * @msg: pointer to the msg buffer @@ -1577,24 +1485,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL;  	u16 vsi_id = al->vsi_id; -	i40e_status aq_ret = 0; +	i40e_status ret = 0;  	int i;  	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||  	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||  	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { -		aq_ret = I40E_ERR_PARAM; +		ret = I40E_ERR_PARAM;  		goto error_param;  	}  	for (i = 0; i < al->num_elements; i++) { -		if (is_broadcast_ether_addr(al->list[i].addr) || -		    is_zero_ether_addr(al->list[i].addr)) { -			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n", -				al->list[i].addr); -			aq_ret = I40E_ERR_PARAM; +		ret = i40e_check_vf_permission(vf, al->list[i].addr); +		if (ret)  			goto error_param; -		}  	}  	vsi = pf->vsi[vsi_id]; @@ -1603,7 +1507,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		struct i40e_mac_filter *f;  		f = i40e_find_mac(vsi, al->list[i].addr, true, false); -		if (f) { +		if (!f) {  			if (i40e_is_vsi_in_vlan(vsi))  				f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,  							 true, false); @@ -1615,7 +1519,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  		if (!f) {  			dev_err(&pf->pdev->dev,  				"Unable to add VF MAC filter\n"); -			aq_ret = I40E_ERR_PARAM; +			ret = I40E_ERR_PARAM;  			goto error_param;  		}  	} @@ -1627,7 +1531,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  error_param:  	/* send the response to the vf */  	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, -				       aq_ret); +				       ret);  }  /** @@ -1645,15 +1549,25 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  	struct i40e_pf *pf = vf->pf;  	struct i40e_vsi *vsi = NULL;  	u16 vsi_id = al->vsi_id; -	i40e_status aq_ret = 0; +	i40e_status ret = 0;  	int i;  	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||  	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||  	    !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { -		aq_ret = I40E_ERR_PARAM; +		ret = I40E_ERR_PARAM;  		goto error_param;  	} + +	for (i = 0; i < al->num_elements; i++) { +		if (is_broadcast_ether_addr(al->list[i].addr) || +		    is_zero_ether_addr(al->list[i].addr)) { +			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", +				al->list[i].addr); +			ret = I40E_ERR_INVALID_MAC_ADDR; +			goto error_param; +		} +	}  	vsi = pf->vsi[vsi_id];  	/* delete addresses from the list */ @@ -1668,7 +1582,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)  error_param:  	/* send the response to the vf */  	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, -				       aq_ret); +				       ret);  }  /** @@ -1777,30 +1691,6 @@ error_param:  }  /** - * i40e_vc_fcoe_msg - * @vf: pointer to the vf info - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * called from the vf for the fcoe msgs - **/ -static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) -{ -	i40e_status aq_ret = 0; - -	if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || -	    !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) { -		aq_ret = I40E_ERR_PARAM; -		goto error_param; -	} -	aq_ret = I40E_ERR_NOT_IMPLEMENTED; - -error_param: -	/* send the response to the vf */ -	return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret); -} - -/**   * i40e_vc_validate_vf_msg   * @vf: pointer to the vf info   * @msg: pointer to the msg buffer @@ -1920,19 +1810,24 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,  int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,  			   u32 v_retval, u8 *msg, u16 msglen)  { -	struct i40e_vf *vf = &(pf->vf[vf_id]);  	struct i40e_hw *hw = &pf->hw; +	unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; +	struct i40e_vf *vf;  	int ret;  	pf->vf_aq_requests++; +	if (local_vf_id >= pf->num_alloc_vfs) +		return -EINVAL; +	vf = &(pf->vf[local_vf_id]);  	/* perform basic checks on the msg */  	ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);  	if (ret) { -		dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id); +		dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", +			local_vf_id, v_opcode, msglen);  		return ret;  	} -	wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE); +  	switch (v_opcode) {  	case I40E_VIRTCHNL_OP_VERSION:  		ret = i40e_vc_get_version_msg(vf); @@ -1941,7 +1836,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,  		ret = i40e_vc_get_vf_resources_msg(vf);  		break;  	case I40E_VIRTCHNL_OP_RESET_VF: -		ret = i40e_vc_reset_vf_msg(vf); +		i40e_vc_reset_vf_msg(vf); +		ret = 0;  		break;  	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:  		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); @@ -1973,13 +1869,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,  	case I40E_VIRTCHNL_OP_GET_STATS:  		ret = i40e_vc_get_stats_msg(vf, msg, msglen);  		break; -	case I40E_VIRTCHNL_OP_FCOE: -		ret = i40e_vc_fcoe_msg(vf, msg, msglen); -		break;  	case I40E_VIRTCHNL_OP_UNKNOWN:  	default: -		dev_err(&pf->pdev->dev, -			"Unsupported opcode %d from vf %d\n", v_opcode, vf_id); +		dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", +			v_opcode, local_vf_id);  		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,  					      I40E_ERR_NOT_IMPLEMENTED);  		break; @@ -2015,19 +1908,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)  			/* clear the bit in GLGEN_VFLRSTAT */  			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); -			if (i40e_reset_vf(vf, true)) -				dev_err(&pf->pdev->dev, -					"Unable to reset the VF %d\n", vf_id); -			/* free up vf resources to destroy vsi state */ -			i40e_free_vf_res(vf); - -			/* allocate new vf resources with the default state */ -			if (i40e_alloc_vf_res(vf)) -				dev_err(&pf->pdev->dev, -					"Unable to allocate VF resources %d\n", -					vf_id); - -			i40e_enable_vf_mappings(vf); +			if (!test_bit(__I40E_DOWN, &pf->state)) +				i40e_reset_vf(vf, true);  		}  	} @@ -2078,15 +1960,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,  void i40e_vc_notify_link_state(struct i40e_pf *pf)  {  	struct i40e_virtchnl_pf_event pfe; +	struct i40e_hw *hw = &pf->hw; +	struct i40e_vf *vf = pf->vf; +	struct i40e_link_status *ls = &pf->hw.phy.link_info; +	int i;  	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;  	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; -	pfe.event_data.link_event.link_status = -	    pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; -	pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; - -	i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, -			     (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); +	for (i = 0; i < pf->num_alloc_vfs; i++) { +		if (vf->link_forced) { +			pfe.event_data.link_event.link_status = vf->link_up; +			pfe.event_data.link_event.link_speed = +				(vf->link_up ? I40E_LINK_SPEED_40GB : 0); +		} else { +			pfe.event_data.link_event.link_status = +				ls->link_info & I40E_AQ_LINK_UP; +			pfe.event_data.link_event.link_speed = ls->link_speed; +		} +		i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, +				       0, (u8 *)&pfe, sizeof(pfe), +				       NULL); +		vf++; +	}  }  /** @@ -2164,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)  	}  	/* delete the temporary mac address */ -	i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false); +	i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id, +			true, false); -	/* add the new mac address */ -	f = i40e_add_filter(vsi, mac, 0, true, false); -	if (!f) { -		dev_err(&pf->pdev->dev, -			"Unable to add VF ucast filter\n"); -		ret = -ENOMEM; -		goto error_param; -	} +	/* Delete all the filters for this VSI - we're going to kill it +	 * anyway. +	 */ +	list_for_each_entry(f, &vsi->mac_filter_list, list) +		i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);  	dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);  	/* program mac filter */ @@ -2182,7 +2075,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)  		ret = -EIO;  		goto error_param;  	} -	memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN); +	ether_addr_copy(vf->default_lan_addr.addr, mac); +	vf->pf_set_mac = true;  	dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");  	ret = 0; @@ -2229,6 +2123,30 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,  		goto error_pvid;  	} +	if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) { +		dev_err(&pf->pdev->dev, +			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", +			vf_id); +		/* Administrator Error - knock the VF offline until he does +		 * the right thing by reconfiguring his network correctly +		 * and then reloading the VF driver. +		 */ +		i40e_vc_disable_vf(pf, vf); +	} + +	/* Check for condition where there was already a port VLAN ID +	 * filter set and now it is being deleted by setting it to zero. +	 * Additionally check for the condition where there was a port +	 * VLAN but now there is a new and different port VLAN being set. +	 * Before deleting all the old VLAN filters we must add new ones +	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our +	 * MAC addresses deleted. +	 */ +	if ((!(vlan_id || qos) || +	    (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) && +	    vsi->info.pvid) +		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY); +  	if (vsi->info.pvid) {  		/* kill old VLAN */  		ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & @@ -2243,7 +2161,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,  		ret = i40e_vsi_add_pvid(vsi,  				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));  	else -		i40e_vlan_stripping_disable(vsi); +		i40e_vsi_remove_pvid(vsi);  	if (vlan_id) {  		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", @@ -2257,18 +2175,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,  				 vsi->back->hw.aq.asq_last_status);  			goto error_pvid;  		} +		/* Kill non-vlan MAC filters - ignore error return since +		 * there might not be any non-vlan MAC filters. +		 */ +		i40e_vsi_kill_vlan(vsi, I40E_VLAN_ANY);  	}  	if (ret) {  		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");  		goto error_pvid;  	} +	/* The Port VLAN needs to be saved across resets the same as the +	 * default LAN MAC address. +	 */ +	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);  	ret = 0;  error_pvid:  	return ret;  } +#define I40E_BW_CREDIT_DIVISOR 50     /* 50Mbps per BW credit */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4  /* device can accumulate 4 credits max */  /**   * i40e_ndo_set_vf_bw   * @netdev: network interface device structure @@ -2277,9 +2205,76 @@ error_pvid:   *   * configure vf tx rate   **/ -int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate) +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, +		       int max_tx_rate)  { -	return -EOPNOTSUPP; +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; +	struct i40e_vsi *vsi; +	struct i40e_vf *vf; +	int speed = 0; +	int ret = 0; + +	/* validate the request */ +	if (vf_id >= pf->num_alloc_vfs) { +		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); +		ret = -EINVAL; +		goto error; +	} + +	if (min_tx_rate) { +		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", +			min_tx_rate, vf_id); +		return -EINVAL; +	} + +	vf = &(pf->vf[vf_id]); +	vsi = pf->vsi[vf->lan_vsi_index]; +	if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { +		dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); +		ret = -EINVAL; +		goto error; +	} + +	switch (pf->hw.phy.link_info.link_speed) { +	case I40E_LINK_SPEED_40GB: +		speed = 40000; +		break; +	case I40E_LINK_SPEED_10GB: +		speed = 10000; +		break; +	case I40E_LINK_SPEED_1GB: +		speed = 1000; +		break; +	default: +		break; +	} + +	if (max_tx_rate > speed) { +		dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", +			max_tx_rate, vf->vf_id); +		ret = -EINVAL; +		goto error; +	} + +	if ((max_tx_rate < 50) && (max_tx_rate > 0)) { +		dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); +		max_tx_rate = 50; +	} + +	/* Tx rate credits are in values of 50Mbps, 0 is disabled*/ +	ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, +					  max_tx_rate / I40E_BW_CREDIT_DIVISOR, +					  I40E_MAX_BW_INACTIVE_ACCUM, NULL); +	if (ret) { +		dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", +			ret); +		ret = -EIO; +		goto error; +	} +	vf->tx_rate = max_tx_rate; +error: +	return ret;  }  /** @@ -2294,7 +2289,6 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,  			   int vf_id, struct ifla_vf_info *ivi)  {  	struct i40e_netdev_priv *np = netdev_priv(netdev); -	struct i40e_mac_filter *f, *ftmp;  	struct i40e_vsi *vsi = np->vsi;  	struct i40e_pf *pf = vsi->back;  	struct i40e_vf *vf; @@ -2318,18 +2312,130 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,  	ivi->vf = vf_id; -	/* first entry of the list is the default ethernet address */ -	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { -		memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS); -		break; -	} +	memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN); -	ivi->tx_rate = 0; +	ivi->max_tx_rate = vf->tx_rate; +	ivi->min_tx_rate = 0;  	ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;  	ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>  		   I40E_VLAN_PRIORITY_SHIFT; +	if (vf->link_forced == false) +		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; +	else if (vf->link_up == true) +		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; +	else +		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +	ivi->spoofchk = vf->spoofchk;  	ret = 0;  error_param:  	return ret;  } + +/** + * i40e_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: vf identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_pf *pf = np->vsi->back; +	struct i40e_virtchnl_pf_event pfe; +	struct i40e_hw *hw = &pf->hw; +	struct i40e_vf *vf; +	int ret = 0; + +	/* validate the request */ +	if (vf_id >= pf->num_alloc_vfs) { +		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); +		ret = -EINVAL; +		goto error_out; +	} + +	vf = &pf->vf[vf_id]; + +	pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; +	pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + +	switch (link) { +	case IFLA_VF_LINK_STATE_AUTO: +		vf->link_forced = false; +		pfe.event_data.link_event.link_status = +			pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; +		pfe.event_data.link_event.link_speed = +			pf->hw.phy.link_info.link_speed; +		break; +	case IFLA_VF_LINK_STATE_ENABLE: +		vf->link_forced = true; +		vf->link_up = true; +		pfe.event_data.link_event.link_status = true; +		pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; +		break; +	case IFLA_VF_LINK_STATE_DISABLE: +		vf->link_forced = true; +		vf->link_up = false; +		pfe.event_data.link_event.link_status = false; +		pfe.event_data.link_event.link_speed = 0; +		break; +	default: +		ret = -EINVAL; +		goto error_out; +	} +	/* Notify the VF of its new link state */ +	i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT, +			       0, (u8 *)&pfe, sizeof(pfe), NULL); + +error_out: +	return ret; +} + +/** + * i40e_ndo_set_vf_spoofchk + * @netdev: network interface device structure + * @vf_id: vf identifier + * @enable: flag to enable or disable feature + * + * Enable or disable VF spoof checking + **/ +int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable) +{ +	struct i40e_netdev_priv *np = netdev_priv(netdev); +	struct i40e_vsi *vsi = np->vsi; +	struct i40e_pf *pf = vsi->back; +	struct i40e_vsi_context ctxt; +	struct i40e_hw *hw = &pf->hw; +	struct i40e_vf *vf; +	int ret = 0; + +	/* validate the request */ +	if (vf_id >= pf->num_alloc_vfs) { +		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); +		ret = -EINVAL; +		goto out; +	} + +	vf = &(pf->vf[vf_id]); + +	if (enable == vf->spoofchk) +		goto out; + +	vf->spoofchk = enable; +	memset(&ctxt, 0, sizeof(ctxt)); +	ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; +	ctxt.pf_num = pf->hw.pf_id; +	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); +	if (enable) +		ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; +	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); +	if (ret) { +		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", +			ret); +		ret = -EIO; +	} +out: +	return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 360382cf304..63e7e0d81ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@  /*******************************************************************************   *   * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2013 - 2014 Intel Corporation.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms and conditions of the GNU General Public License, @@ -12,9 +12,8 @@   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for   * more details.   * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * You should have received a copy of the GNU General Public License along + * with this program.  If not, see <http://www.gnu.org/licenses/>.   *   * The full GNU General Public License is included in this distribution in   * the file called "COPYING". @@ -82,6 +81,8 @@ struct i40e_vf {  	struct i40e_virtchnl_ether_addr default_lan_addr;  	struct i40e_virtchnl_ether_addr default_fcoe_addr; +	u16 port_vlan_id; +	bool pf_set_mac;	/* The VMM admin set the VF MAC address */  	/* VSI indices - actual VSI pointers are maintained in the PF structure  	 * When assigned, these will be non-zero, because VSI 0 is always @@ -97,23 +98,32 @@ struct i40e_vf {  	unsigned long vf_caps;	/* vf's adv. capabilities */  	unsigned long vf_states;	/* vf's runtime states */ +	unsigned int tx_rate;	/* Tx bandwidth limit in Mbps */ +	bool link_forced; +	bool link_up;		/* only valid if vf link is forced */ +	bool spoofchk;  };  void i40e_free_vfs(struct i40e_pf *pf);  int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);  int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,  			   u32 v_retval, u8 *msg, u16 msglen);  int i40e_vc_process_vflr_event(struct i40e_pf *pf); -int i40e_reset_vf(struct i40e_vf *vf, bool flr); +void i40e_reset_vf(struct i40e_vf *vf, bool flr);  void i40e_vc_notify_vf_reset(struct i40e_vf *vf);  /* vf configuration related iplink handlers */  int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);  int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,  			      int vf_id, u16 vlan_id, u8 qos); -int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); +int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, +		       int max_tx_rate);  int i40e_ndo_get_vf_config(struct net_device *netdev,  			   int vf_id, struct ifla_vf_info *ivi); +int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); +int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable); +  void i40e_vc_notify_link_state(struct i40e_pf *pf);  void i40e_vc_notify_reset(struct i40e_pf *pf);  | 
