diff options
Diffstat (limited to 'drivers/net/ethernet/emulex')
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/Kconfig | 8 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be.h | 238 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_cmds.c | 1046 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_cmds.h | 517 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_ethtool.c | 333 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_hw.h | 24 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_main.c | 1728 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_roce.c | 8 | ||||
| -rw-r--r-- | drivers/net/ethernet/emulex/benet/be_roce.h | 5 | 
9 files changed, 2553 insertions, 1354 deletions
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index 231129dd176..ea94a8eb6b3 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -4,3 +4,11 @@ config BE2NET  	---help---  	  This driver implements the NIC functionality for ServerEngines'  	  10Gbps network adapter - BladeEngine. + +config BE2NET_VXLAN +        bool "VXLAN offload support on be2net driver" +        default y +        depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m) +        ---help--- +	  Say Y here if you want to enable VXLAN offload support on +	  be2net driver. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index ace5050dba3..c2f5d2d3b93 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -34,7 +34,7 @@  #include "be_hw.h"  #include "be_roce.h" -#define DRV_VER			"4.9.134.0u" +#define DRV_VER			"10.2u"  #define DRV_NAME		"be2net"  #define BE_NAME			"Emulex BladeEngine2"  #define BE3_NAME		"Emulex BladeEngine3" @@ -42,7 +42,7 @@  #define OC_NAME_BE		OC_NAME	"(be3)"  #define OC_NAME_LANCER		OC_NAME "(Lancer)"  #define OC_NAME_SH		OC_NAME "(Skyhawk)" -#define DRV_DESC		"Emulex OneConnect 10Gbps NIC Driver" +#define DRV_DESC		"Emulex OneConnect NIC Driver"  #define BE_VENDOR_ID 		0x19a2  #define EMULEX_VENDOR_ID	0x10df @@ -88,7 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)  #define BE_MIN_MTU		256  #define BE_NUM_VLANS_SUPPORTED	64 -#define BE_MAX_EQD		96u +#define BE_MAX_EQD		128u  #define	BE_MAX_TX_FRAG_COUNT	30  #define EVNT_Q_LEN		1024 @@ -103,6 +103,7 @@ static inline char *nic_name(struct pci_dev *pdev)  #define BE3_MAX_RSS_QS		16  #define BE3_MAX_TX_QS		16  #define BE3_MAX_EVT_QS		16 +#define BE3_SRIOV_MAX_EVT_QS	8  #define MAX_RX_QS		32  #define MAX_EVT_QS		32 @@ -119,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)  #define MAX_VFS			30 /* Max VFs supported by BE3 FW */  #define FW_VER_LEN		32 +#define	RSS_INDIR_TABLE_LEN	128 +#define RSS_HASH_KEY_LEN	40 +  struct be_dma_mem {  	void *va;  	dma_addr_t dma; @@ -198,8 +202,37 @@ struct be_eq_obj {  	u16 spurious_intr;  	struct napi_struct napi;  	struct be_adapter *adapter; + +#ifdef CONFIG_NET_RX_BUSY_POLL +#define BE_EQ_IDLE		0 +#define BE_EQ_NAPI		1	/* napi owns this EQ */ +#define BE_EQ_POLL		2	/* poll owns this EQ */ +#define BE_EQ_LOCKED		(BE_EQ_NAPI | BE_EQ_POLL) +#define BE_EQ_NAPI_YIELD	4	/* napi yielded this EQ */ +#define BE_EQ_POLL_YIELD	8	/* poll yielded this EQ */ +#define BE_EQ_YIELD		(BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD) +#define BE_EQ_USER_PEND		(BE_EQ_POLL | BE_EQ_POLL_YIELD) +	unsigned int state; +	spinlock_t lock;	/* lock to serialize napi and busy-poll */ +#endif  /* CONFIG_NET_RX_BUSY_POLL */  } ____cacheline_aligned_in_smp; +struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */ +	bool enable; +	u32 min_eqd;		/* in usecs */ +	u32 max_eqd;		/* in usecs */ +	u32 prev_eqd;		/* in usecs */ +	u32 et_eqd;		/* configured val when aic is off */ +	ulong jiffies; +	u64 rx_pkts_prev;	/* Used to calculate RX pps */ +	u64 tx_reqs_prev;	/* Used to calculate TX pps */ +}; + +enum { +	NAPI_POLLING, +	BUSY_POLLING +}; +  struct be_mcc_obj {  	struct be_queue_info q;  	struct be_queue_info cq; @@ -214,6 +247,7 @@ struct be_tx_stats {  	u64 tx_compl;  	ulong tx_jiffies;  	u32 tx_stops; +	u32 tx_drv_drops;	/* pkts dropped by driver */  	struct u64_stats_sync sync;  	struct u64_stats_sync sync_compl;  }; @@ -230,23 +264,21 @@ struct be_tx_obj {  /* Struct to remember the pages posted for rx frags */  struct be_rx_page_info {  	struct page *page; +	/* set to page-addr for last frag of the page & frag-addr otherwise */  	DEFINE_DMA_UNMAP_ADDR(bus);  	u16 page_offset; -	bool last_page_user; +	bool last_frag;		/* last frag of the page */  };  struct be_rx_stats {  	u64 rx_bytes;  	u64 rx_pkts; -	u64 rx_pkts_prev; -	ulong rx_jiffies;  	u32 rx_drops_no_skbs;	/* skb allocation errors */  	u32 rx_drops_no_frags;	/* HW has no fetched frags */  	u32 rx_post_fail;	/* page post alloc failures */  	u32 rx_compl;  	u32 rx_mcast_pkts;  	u32 rx_compl_err;	/* completions with err set */ -	u32 rx_pps;		/* pkts per second */  	struct u64_stats_sync sync;  }; @@ -254,7 +286,6 @@ struct be_rx_compl_info {  	u32 rss_hash;  	u16 vlan_tag;  	u16 pkt_size; -	u16 rxq_idx;  	u16 port;  	u8 vlanf;  	u8 num_rcvd; @@ -265,9 +296,10 @@ struct be_rx_compl_info {  	u8 ip_csum;  	u8 l4_csum;  	u8 ipv6; -	u8 vtm; +	u8 qnq;  	u8 pkt_type;  	u8 ip_frag; +	u8 tunneled;  };  struct be_rx_obj { @@ -315,15 +347,23 @@ struct be_drv_stats {  	u32 rx_input_fifo_overflow_drop;  	u32 pmem_fifo_overflow_drop;  	u32 jabber_events; +	u32 rx_roce_bytes_lsd; +	u32 rx_roce_bytes_msd; +	u32 rx_roce_frames; +	u32 roce_drops_payload_len; +	u32 roce_drops_crc;  }; +/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ +#define BE_RESET_VLAN_TAG_ID	0xFFFF +  struct be_vf_cfg {  	unsigned char mac_addr[ETH_ALEN];  	int if_handle;  	int pmac_id; -	u16 def_vid;  	u16 vlan_tag;  	u32 tx_rate; +	u32 plink_tracking;  };  enum vf_state { @@ -333,11 +373,15 @@ enum vf_state {  #define BE_FLAGS_LINK_STATUS_INIT		1  #define BE_FLAGS_WORKER_SCHEDULED		(1 << 3) +#define BE_FLAGS_VLAN_PROMISC			(1 << 4) +#define BE_FLAGS_MCAST_PROMISC			(1 << 5)  #define BE_FLAGS_NAPI_ENABLED			(1 << 9) -#define BE_UC_PMAC_COUNT		30 -#define BE_VF_UC_PMAC_COUNT		2  #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11) +#define BE_FLAGS_VXLAN_OFFLOADS			(1 << 12) +#define BE_FLAGS_SETUP_DONE			(1 << 13) +#define BE_UC_PMAC_COUNT			30 +#define BE_VF_UC_PMAC_COUNT			2  /* Ethtool set_dump flags */  #define LANCER_INITIATE_FW_DUMP			0x1 @@ -369,6 +413,13 @@ struct be_resources {  	u32 if_cap_flags;  }; +struct rss_info { +	u64 rss_flags; +	u8 rsstable[RSS_INDIR_TABLE_LEN]; +	u8 rss_queue[RSS_INDIR_TABLE_LEN]; +	u8 rss_hkey[RSS_HASH_KEY_LEN]; +}; +  struct be_adapter {  	struct pci_dev *pdev;  	struct net_device *netdev; @@ -403,8 +454,9 @@ struct be_adapter {  	u32 big_page_size;	/* Compounded page size shared by rx wrbs */  	struct be_drv_stats drv_stats; +	struct be_aic_obj aic_obj[MAX_EVT_QS];  	u16 vlans_added; -	u8 vlan_tag[VLAN_N_VID]; +	unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];  	u8 vlan_prio_bmap;	/* Available Priority BitMap */  	u16 recommended_prio;	/* Recommended Priority */  	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */ @@ -430,12 +482,12 @@ struct be_adapter {  	u32 port_num;  	bool promiscuous; +	u8 mc_type;  	u32 function_mode;  	u32 function_caps;  	u32 rx_fc;		/* Rx flow control */  	u32 tx_fc;		/* Tx flow control */  	bool stats_cmd_sent; -	u32 if_type;  	struct {  		u32 size;  		u32 total_size; @@ -446,7 +498,7 @@ struct be_adapter {  	struct list_head entry;  	u32 flash_status; -	struct completion flash_compl; +	struct completion et_cmd_compl;  	struct be_resources res;	/* resources available for the func */  	u16 num_vfs;			/* Number of VFs provisioned by PF */ @@ -456,22 +508,24 @@ struct be_adapter {  	u32 sli_family;  	u8 hba_port_num;  	u16 pvid; +	__be16 vxlan_port;  	struct phy_info phy;  	u8 wol_cap; -	bool wol; +	bool wol_en;  	u32 uc_macs;		/* Count of secondary UC MAC programmed */  	u16 asic_rev;  	u16 qnq_vid;  	u32 msg_enable;  	int be_get_temp_freq;  	u8 pf_number; -	u64 rss_flags; +	struct rss_info rss_info;  };  #define be_physfn(adapter)		(!adapter->virtfn) +#define be_virtfn(adapter)		(adapter->virtfn)  #define	sriov_enabled(adapter)		(adapter->num_vfs > 0) -#define sriov_want(adapter)             (be_max_vfs(adapter) && num_vfs && \ -					 be_physfn(adapter)) +#define sriov_want(adapter)             (be_physfn(adapter) &&	\ +					 (num_vfs || pci_num_vf(adapter->pdev)))  #define for_all_vfs(adapter, vf_cfg, i)					\  	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\  		i++, vf_cfg++) @@ -499,6 +553,12 @@ static inline u16 be_max_qs(struct be_adapter *adapter)  	return min_t(u16, num, num_online_cpus());  } +/* Is BE in pvid_tagging mode */ +#define be_pvid_tagging_enabled(adapter)	(adapter->pvid) + +/* Is BE in QNQ multi-channel mode */ +#define be_is_qnq_mode(adapter)		(adapter->function_mode & QNQ_MODE) +  #define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \  				 adapter->pdev->device == OC_DEVICE_ID4) @@ -544,6 +604,10 @@ extern const struct ethtool_ops be_ethtool_ops;  	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \  		i++, eqo++) +#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i)			\ +	for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ +		 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) +  #define is_mcc_eqo(eqo)			(eqo->idx == 0)  #define mcc_eqo(adapter)		(&adapter->eq_obj[0]) @@ -694,27 +758,137 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)  	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;  } -extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, -		u16 num_popped); -extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); -extern void be_parse_stats(struct be_adapter *adapter); -extern int be_load_fw(struct be_adapter *adapter, u8 *func); -extern bool be_is_wol_supported(struct be_adapter *adapter); -extern bool be_pause_supported(struct be_adapter *adapter); -extern u32 be_get_fw_log_level(struct be_adapter *adapter); +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ +	bool status = true; + +	spin_lock(&eqo->lock); /* BH is already disabled */ +	if (eqo->state & BE_EQ_LOCKED) { +		WARN_ON(eqo->state & BE_EQ_NAPI); +		eqo->state |= BE_EQ_NAPI_YIELD; +		status = false; +	} else { +		eqo->state = BE_EQ_NAPI; +	} +	spin_unlock(&eqo->lock); +	return status; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ +	spin_lock(&eqo->lock); /* BH is already disabled */ + +	WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD)); +	eqo->state = BE_EQ_IDLE; + +	spin_unlock(&eqo->lock); +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ +	bool status = true; + +	spin_lock_bh(&eqo->lock); +	if (eqo->state & BE_EQ_LOCKED) { +		eqo->state |= BE_EQ_POLL_YIELD; +		status = false; +	} else { +		eqo->state |= BE_EQ_POLL; +	} +	spin_unlock_bh(&eqo->lock); +	return status; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ +	spin_lock_bh(&eqo->lock); + +	WARN_ON(eqo->state & (BE_EQ_NAPI)); +	eqo->state = BE_EQ_IDLE; + +	spin_unlock_bh(&eqo->lock); +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ +	spin_lock_init(&eqo->lock); +	eqo->state = BE_EQ_IDLE; +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ +	local_bh_disable(); + +	/* It's enough to just acquire napi lock on the eqo to stop +	 * be_busy_poll() from processing any queueus. +	 */ +	while (!be_lock_napi(eqo)) +		mdelay(1); + +	local_bh_enable(); +} + +#else /* CONFIG_NET_RX_BUSY_POLL */ + +static inline bool be_lock_napi(struct be_eq_obj *eqo) +{ +	return true; +} + +static inline void be_unlock_napi(struct be_eq_obj *eqo) +{ +} + +static inline bool be_lock_busy_poll(struct be_eq_obj *eqo) +{ +	return false; +} + +static inline void be_unlock_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_enable_busy_poll(struct be_eq_obj *eqo) +{ +} + +static inline void be_disable_busy_poll(struct be_eq_obj *eqo) +{ +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, +		  u16 num_popped); +void be_link_status_update(struct be_adapter *adapter, u8 link_status); +void be_parse_stats(struct be_adapter *adapter); +int be_load_fw(struct be_adapter *adapter, u8 *func); +bool be_is_wol_supported(struct be_adapter *adapter); +bool be_pause_supported(struct be_adapter *adapter); +u32 be_get_fw_log_level(struct be_adapter *adapter); + +static inline int fw_major_num(const char *fw_ver) +{ +	int fw_major = 0; + +	sscanf(fw_ver, "%d.", &fw_major); + +	return fw_major; +} +  int be_update_queues(struct be_adapter *adapter);  int be_poll(struct napi_struct *napi, int budget);  /*   * internal function to initialize-cleanup roce device.   */ -extern void be_roce_dev_add(struct be_adapter *); -extern void be_roce_dev_remove(struct be_adapter *); +void be_roce_dev_add(struct be_adapter *); +void be_roce_dev_remove(struct be_adapter *);  /*   * internal function to open-close roce device during ifup-ifdown.   */ -extern void be_roce_dev_open(struct be_adapter *); -extern void be_roce_dev_close(struct be_adapter *); +void be_roce_dev_open(struct be_adapter *); +void be_roce_dev_close(struct be_adapter *);  #endif				/* BE_H */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1ab5dab11ef..f4ea3490f44 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {  	}  }; -static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, -			   u8 subsystem) +static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)  {  	int i;  	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); @@ -120,10 +119,76 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)  	return (void *)addr;  } +static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) +{ +	if (base_status == MCC_STATUS_NOT_SUPPORTED || +	    base_status == MCC_STATUS_ILLEGAL_REQUEST || +	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || +	    (opcode == OPCODE_COMMON_WRITE_FLASHROM && +	    (base_status == MCC_STATUS_ILLEGAL_FIELD || +	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) +		return true; +	else +		return false; +} + +/* Place holder for all the async MCC cmds wherein the caller is not in a busy + * loop (has not issued be_mcc_notify_wait()) + */ +static void be_async_cmd_process(struct be_adapter *adapter, +				 struct be_mcc_compl *compl, +				 struct be_cmd_resp_hdr *resp_hdr) +{ +	enum mcc_base_status base_status = base_status(compl->status); +	u8 opcode = 0, subsystem = 0; + +	if (resp_hdr) { +		opcode = resp_hdr->opcode; +		subsystem = resp_hdr->subsystem; +	} + +	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && +	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) { +		complete(&adapter->et_cmd_compl); +		return; +	} + +	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || +	     opcode == OPCODE_COMMON_WRITE_OBJECT) && +	    subsystem == CMD_SUBSYSTEM_COMMON) { +		adapter->flash_status = compl->status; +		complete(&adapter->et_cmd_compl); +		return; +	} + +	if ((opcode == OPCODE_ETH_GET_STATISTICS || +	     opcode == OPCODE_ETH_GET_PPORT_STATS) && +	    subsystem == CMD_SUBSYSTEM_ETH && +	    base_status == MCC_STATUS_SUCCESS) { +		be_parse_stats(adapter); +		adapter->stats_cmd_sent = false; +		return; +	} + +	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && +	    subsystem == CMD_SUBSYSTEM_COMMON) { +		if (base_status == MCC_STATUS_SUCCESS) { +			struct be_cmd_resp_get_cntl_addnl_attribs *resp = +							(void *)resp_hdr; +			adapter->drv_stats.be_on_die_temperature = +						resp->on_die_temperature; +		} else { +			adapter->be_get_temp_freq = 0; +		} +		return; +	} +} +  static int be_mcc_compl_process(struct be_adapter *adapter,  				struct be_mcc_compl *compl)  { -	u16 compl_status, extd_status; +	enum mcc_base_status base_status; +	enum mcc_addl_status addl_status;  	struct be_cmd_resp_hdr *resp_hdr;  	u8 opcode = 0, subsystem = 0; @@ -131,70 +196,49 @@ static int be_mcc_compl_process(struct be_adapter *adapter,  	 * from mcc_wrb */  	be_dws_le_to_cpu(compl, 4); -	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & -				CQE_STATUS_COMPL_MASK; +	base_status = base_status(compl->status); +	addl_status = addl_status(compl->status);  	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); -  	if (resp_hdr) {  		opcode = resp_hdr->opcode;  		subsystem = resp_hdr->subsystem;  	} -	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || -	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) && -	    (subsystem == CMD_SUBSYSTEM_COMMON)) { -		adapter->flash_status = compl_status; -		complete(&adapter->flash_compl); -	} +	be_async_cmd_process(adapter, compl, resp_hdr); -	if (compl_status == MCC_STATUS_SUCCESS) { -		if (((opcode == OPCODE_ETH_GET_STATISTICS) || -		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) && -		    (subsystem == CMD_SUBSYSTEM_ETH)) { -			be_parse_stats(adapter); -			adapter->stats_cmd_sent = false; -		} -		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && -		    subsystem == CMD_SUBSYSTEM_COMMON) { -			struct be_cmd_resp_get_cntl_addnl_attribs *resp = -				(void *)resp_hdr; -			adapter->drv_stats.be_on_die_temperature = -				resp->on_die_temperature; -		} -	} else { -		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) -			adapter->be_get_temp_freq = 0; - -		if (compl_status == MCC_STATUS_NOT_SUPPORTED || -			compl_status == MCC_STATUS_ILLEGAL_REQUEST) -			goto done; +	if (base_status != MCC_STATUS_SUCCESS && +	    !be_skip_err_log(opcode, base_status, addl_status)) { -		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { +		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {  			dev_warn(&adapter->pdev->dev,  				 "VF is not privileged to issue opcode %d-%d\n",  				 opcode, subsystem);  		} else { -			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & -					CQE_STATUS_EXTD_MASK;  			dev_err(&adapter->pdev->dev,  				"opcode %d-%d failed:status %d-%d\n", -				opcode, subsystem, compl_status, extd_status); +				opcode, subsystem, base_status, addl_status);  		}  	} -done: -	return compl_status; +	return compl->status;  }  /* Link state evt is a string of bytes; no need for endian swapping */  static void be_async_link_state_process(struct be_adapter *adapter, -		struct be_async_event_link_state *evt) +					struct be_mcc_compl *compl)  { +	struct be_async_event_link_state *evt = +			(struct be_async_event_link_state *)compl; +  	/* When link status changes, link speed must be re-queried from FW */  	adapter->phy.link_speed = -1; -	/* Ignore physical link event */ -	if (lancer_chip(adapter) && +	/* On BEx the FW does not send a separate link status +	 * notification for physical and logical link. +	 * On other chips just process the logical link +	 * status notification +	 */ +	if (!BEx_chip(adapter) &&  	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))  		return; @@ -202,13 +246,17 @@ static void be_async_link_state_process(struct be_adapter *adapter,  	 * it may not be received in some cases.  	 */  	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) -		be_link_status_update(adapter, evt->port_link_status); +		be_link_status_update(adapter, +				      evt->port_link_status & LINK_STATUS_MASK);  }  /* Grp5 CoS Priority evt */  static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, -		struct be_async_event_grp5_cos_priority *evt) +					       struct be_mcc_compl *compl)  { +	struct be_async_event_grp5_cos_priority *evt = +			(struct be_async_event_grp5_cos_priority *)compl; +  	if (evt->valid) {  		adapter->vlan_prio_bmap = evt->available_priority_bmap;  		adapter->recommended_prio &= ~VLAN_PRIO_MASK; @@ -219,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,  /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */  static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, -		struct be_async_event_grp5_qos_link_speed *evt) +					    struct be_mcc_compl *compl)  { +	struct be_async_event_grp5_qos_link_speed *evt = +			(struct be_async_event_grp5_qos_link_speed *)compl; +  	if (adapter->phy.link_speed >= 0 &&  	    evt->physical_port == adapter->port_num)  		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; @@ -228,35 +279,35 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,  /*Grp5 PVID evt*/  static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, -		struct be_async_event_grp5_pvid_state *evt) +					     struct be_mcc_compl *compl)  { -	if (evt->enabled) +	struct be_async_event_grp5_pvid_state *evt = +			(struct be_async_event_grp5_pvid_state *)compl; + +	if (evt->enabled) {  		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; -	else +		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); +	} else {  		adapter->pvid = 0; +	}  }  static void be_async_grp5_evt_process(struct be_adapter *adapter, -		u32 trailer, struct be_mcc_compl *evt) +				      struct be_mcc_compl *compl)  { -	u8 event_type = 0; - -	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & -		ASYNC_TRAILER_EVENT_TYPE_MASK; +	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & +				ASYNC_EVENT_TYPE_MASK;  	switch (event_type) {  	case ASYNC_EVENT_COS_PRIORITY: -		be_async_grp5_cos_priority_process(adapter, -		(struct be_async_event_grp5_cos_priority *)evt); -	break; +		be_async_grp5_cos_priority_process(adapter, compl); +		break;  	case ASYNC_EVENT_QOS_SPEED: -		be_async_grp5_qos_speed_process(adapter, -		(struct be_async_event_grp5_qos_link_speed *)evt); -	break; +		be_async_grp5_qos_speed_process(adapter, compl); +		break;  	case ASYNC_EVENT_PVID_STATE: -		be_async_grp5_pvid_state_process(adapter, -		(struct be_async_event_grp5_pvid_state *)evt); -	break; +		be_async_grp5_pvid_state_process(adapter, compl); +		break;  	default:  		dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",  			 event_type); @@ -265,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,  }  static void be_async_dbg_evt_process(struct be_adapter *adapter, -		u32 trailer, struct be_mcc_compl *cmp) +				     struct be_mcc_compl *cmp)  {  	u8 event_type = 0;  	struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; -	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & -		ASYNC_TRAILER_EVENT_TYPE_MASK; +	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & +			ASYNC_EVENT_TYPE_MASK;  	switch (event_type) {  	case ASYNC_DEBUG_EVENT_TYPE_QNQ: @@ -286,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,  	}  } -static inline bool is_link_state_evt(u32 trailer) +static inline bool is_link_state_evt(u32 flags)  { -	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & -		ASYNC_TRAILER_EVENT_CODE_MASK) == -				ASYNC_EVENT_CODE_LINK_STATE; +	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == +			ASYNC_EVENT_CODE_LINK_STATE;  } -static inline bool is_grp5_evt(u32 trailer) +static inline bool is_grp5_evt(u32 flags)  { -	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & -		ASYNC_TRAILER_EVENT_CODE_MASK) == -				ASYNC_EVENT_CODE_GRP_5); +	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == +			ASYNC_EVENT_CODE_GRP_5;  } -static inline bool is_dbg_evt(u32 trailer) +static inline bool is_dbg_evt(u32 flags)  { -	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & -		ASYNC_TRAILER_EVENT_CODE_MASK) == -				ASYNC_EVENT_CODE_QNQ); +	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == +			ASYNC_EVENT_CODE_QNQ; +} + +static void be_mcc_event_process(struct be_adapter *adapter, +				 struct be_mcc_compl *compl) +{ +	if (is_link_state_evt(compl->flags)) +		be_async_link_state_process(adapter, compl); +	else if (is_grp5_evt(compl->flags)) +		be_async_grp5_evt_process(adapter, compl); +	else if (is_dbg_evt(compl->flags)) +		be_async_dbg_evt_process(adapter, compl);  }  static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) @@ -346,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)  	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;  	spin_lock(&adapter->mcc_cq_lock); +  	while ((compl = be_mcc_compl_get(adapter))) {  		if (compl->flags & CQE_FLAGS_ASYNC_MASK) { -			/* Interpret flags as an async trailer */ -			if (is_link_state_evt(compl->flags)) -				be_async_link_state_process(adapter, -				(struct be_async_event_link_state *) compl); -			else if (is_grp5_evt(compl->flags)) -				be_async_grp5_evt_process(adapter, -				compl->flags, compl); -			else if (is_dbg_evt(compl->flags)) -				be_async_dbg_evt_process(adapter, -				compl->flags, compl); +			be_mcc_event_process(adapter, compl);  		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { -				status = be_mcc_compl_process(adapter, compl); -				atomic_dec(&mcc_obj->q.used); +			status = be_mcc_compl_process(adapter, compl); +			atomic_dec(&mcc_obj->q.used);  		}  		be_mcc_compl_use(compl);  		num++; @@ -420,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)  	if (status == -EIO)  		goto out; -	status = resp->status; +	status = (resp->base_status | +		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) << +		   CQE_ADDL_STATUS_SHIFT));  out:  	return status;  } @@ -519,7 +572,7 @@ static u16 be_POST_stage_get(struct be_adapter *adapter)  	return sem & POST_STAGE_MASK;  } -int lancer_wait_ready(struct be_adapter *adapter) +static int lancer_wait_ready(struct be_adapter *adapter)  {  #define SLIPORT_READY_TIMEOUT 30  	u32 sliport_status; @@ -544,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)  	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;  	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);  	if (sliport_status & SLIPORT_STATUS_ERR_MASK) { -		sliport_err1 = ioread32(adapter->db + -					SLIPORT_ERROR1_OFFSET); -		sliport_err2 = ioread32(adapter->db + -					SLIPORT_ERROR2_OFFSET); +		sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); +		sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);  		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&  		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2) @@ -614,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)  		if (stage == POST_STAGE_ARMFW_RDY)  			return 0; -		dev_info(dev, "Waiting for POST, %ds elapsed\n", -			 timeout); +		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);  		if (msleep_interruptible(2000)) {  			dev_err(dev, "Waiting for POST aborted\n");  			return -EINTR; @@ -633,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)  	return &wrb->payload.sgl[0];  } -static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, -				 unsigned long addr) +static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)  {  	wrb->tag0 = addr & 0xFFFFFFFF;  	wrb->tag1 = upper_32_bits(addr); @@ -643,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,  /* Don't touch the hdr after it's prepared */  /* mem will be NULL for embedded commands */  static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, -				u8 subsystem, u8 opcode, int cmd_len, -				struct be_mcc_wrb *wrb, struct be_dma_mem *mem) +				   u8 subsystem, u8 opcode, int cmd_len, +				   struct be_mcc_wrb *wrb, +				   struct be_dma_mem *mem)  {  	struct be_sge *sge; @@ -667,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,  }  static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, -			struct be_dma_mem *mem) +				      struct be_dma_mem *mem)  {  	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);  	u64 dma = (u64)mem->dma; @@ -852,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, +			       NULL);  	/* Support for EQ_CREATEv2 available only SH-R onwards */  	if (!(BEx_chip(adapter) || lancer_chip(adapter))) @@ -901,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, +			       NULL);  	req->type = MAC_ADDRESS_TYPE_NETWORK;  	if (permanent) {  		req->permanent = 1; @@ -924,7 +976,7 @@ err:  /* Uses synchronous MCCQ */  int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, -		u32 if_id, u32 *pmac_id, u32 domain) +		    u32 if_id, u32 *pmac_id, u32 domain)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_pmac_add *req; @@ -940,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, +			       NULL);  	req->hdr.domain = domain;  	req->if_id = cpu_to_le32(if_id); @@ -996,7 +1049,7 @@ err:  /* Uses Mbox */  int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, -		struct be_queue_info *eq, bool no_delay, int coalesce_wm) +		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_cq_create *req; @@ -1012,32 +1065,38 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,  	ctxt = &req->context;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, +			       NULL);  	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));  	if (BEx_chip(adapter)) {  		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, -								coalesce_wm); +			      coalesce_wm);  		AMAP_SET_BITS(struct amap_cq_context_be, nodelay, -								ctxt, no_delay); +			      ctxt, no_delay);  		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, -						__ilog2_u32(cq->len/256)); +			      __ilog2_u32(cq->len / 256));  		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);  		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);  		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);  	} else {  		req->hdr.version = 2;  		req->page_size = 1; /* 1 for 4K */ + +		/* coalesce-wm field in this cmd is not relevant to Lancer. +		 * Lancer uses COMMON_MODIFY_CQ to set this field +		 */ +		if (!lancer_chip(adapter)) +			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, +				      ctxt, coalesce_wm);  		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, -								no_delay); +			      no_delay);  		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, -						__ilog2_u32(cq->len/256)); +			      __ilog2_u32(cq->len / 256));  		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); -		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, -								ctxt, 1); -		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, -								ctxt, eq->id); +		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); +		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);  	}  	be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -1065,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)  }  static int be_cmd_mccq_ext_create(struct be_adapter *adapter, -				struct be_queue_info *mccq, -				struct be_queue_info *cq) +				  struct be_queue_info *mccq, +				  struct be_queue_info *cq)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_mcc_ext_create *req; @@ -1082,26 +1141,26 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,  	ctxt = &req->context;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, +			       NULL);  	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); -	if (lancer_chip(adapter)) { -		req->hdr.version = 1; -		req->cq_id = cpu_to_le16(cq->id); - -		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, -						be_encoded_q_len(mccq->len)); -		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); -		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, -								ctxt, cq->id); -		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, -								 ctxt, 1); - -	} else { +	if (BEx_chip(adapter)) {  		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);  		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, -						be_encoded_q_len(mccq->len)); +			      be_encoded_q_len(mccq->len));  		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); +	} else { +		req->hdr.version = 1; +		req->cq_id = cpu_to_le16(cq->id); + +		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, +			      be_encoded_q_len(mccq->len)); +		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); +		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, +			      ctxt, cq->id); +		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, +			      ctxt, 1);  	}  	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ @@ -1123,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,  }  static int be_cmd_mccq_org_create(struct be_adapter *adapter, -				struct be_queue_info *mccq, -				struct be_queue_info *cq) +				  struct be_queue_info *mccq, +				  struct be_queue_info *cq)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_mcc_create *req; @@ -1140,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,  	ctxt = &req->context;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, +			       NULL);  	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));  	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);  	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, -			be_encoded_q_len(mccq->len)); +		      be_encoded_q_len(mccq->len));  	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);  	be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -1165,13 +1225,12 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,  }  int be_cmd_mccq_create(struct be_adapter *adapter, -			struct be_queue_info *mccq, -			struct be_queue_info *cq) +		       struct be_queue_info *mccq, struct be_queue_info *cq)  {  	int status;  	status = be_cmd_mccq_ext_create(adapter, mccq, cq); -	if (status && !lancer_chip(adapter)) { +	if (status && BEx_chip(adapter)) {  		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "  			"or newer to avoid conflicting priorities between NIC "  			"and FCoE traffic"); @@ -1191,11 +1250,10 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)  	req = embedded_payload(&wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -				OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); +			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);  	if (lancer_chip(adapter)) {  		req->hdr.version = 1; -		req->if_id = cpu_to_le16(adapter->if_handle);  	} else if (BEx_chip(adapter)) {  		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)  			req->hdr.version = 2; @@ -1203,6 +1261,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)  		req->hdr.version = 2;  	} +	if (req->hdr.version > 0) +		req->if_id = cpu_to_le16(adapter->if_handle);  	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);  	req->ulp_num = BE_ULP1_NUM;  	req->type = BE_ETH_TX_RING_TYPE_STANDARD; @@ -1227,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)  /* Uses MCC */  int be_cmd_rxq_create(struct be_adapter *adapter, -		struct be_queue_info *rxq, u16 cq_id, u16 frag_size, -		u32 if_id, u32 rss, u8 *rss_id) +		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size, +		      u32 if_id, u32 rss, u8 *rss_id)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_eth_rx_create *req; @@ -1245,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -				OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); +			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);  	req->cq_id = cpu_to_le16(cq_id);  	req->frag_size = fls(frag_size) - 1; @@ -1272,7 +1332,7 @@ err:   * Uses Mbox   */  int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, -		int queue_type) +		     int queue_type)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_q_destroy *req; @@ -1311,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,  	}  	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, -				NULL); +			       NULL);  	req->id = cpu_to_le16(q->id);  	status = be_mbox_notify_wait(adapter); @@ -1338,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -			OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); +			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);  	req->id = cpu_to_le16(q->id);  	status = be_mcc_notify_wait(adapter); @@ -1361,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,  	req = embedded_payload(&wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL); +			       OPCODE_COMMON_NTWK_INTERFACE_CREATE, +			       sizeof(*req), &wrb, NULL);  	req->hdr.domain = domain;  	req->capability_flags = cpu_to_le32(cap_flags);  	req->enable_flags = cpu_to_le32(en_flags); @@ -1399,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY, +			       sizeof(*req), wrb, NULL);  	req->hdr.domain = domain;  	req->interface_id = cpu_to_le32(interface_id); @@ -1429,11 +1491,16 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)  	hdr = nonemb_cmd->va;  	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, -		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); +			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, +			       nonemb_cmd);  	/* version 1 of the cmd is not supported only by BE2 */ -	if (!BE2_chip(adapter)) +	if (BE2_chip(adapter)) +		hdr->version = 0; +	if (BE3_chip(adapter) || lancer_chip(adapter))  		hdr->version = 1; +	else +		hdr->version = 2;  	be_mcc_notify(adapter);  	adapter->stats_cmd_sent = true; @@ -1445,7 +1512,7 @@ err:  /* Lancer Stats */  int lancer_cmd_get_pport_stats(struct be_adapter *adapter, -				struct be_dma_mem *nonemb_cmd) +			       struct be_dma_mem *nonemb_cmd)  {  	struct be_mcc_wrb *wrb; @@ -1466,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,  	req = nonemb_cmd->va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, -			nonemb_cmd); +			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, +			       wrb, nonemb_cmd);  	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);  	req->cmd_params.params.reset_stats = 0; @@ -1526,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, +			       sizeof(*req), wrb, NULL);  	/* version 1 of the cmd is not supported only by BE2 */  	if (!BE2_chip(adapter)) @@ -1571,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), -		wrb, NULL); +			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, +			       sizeof(*req), wrb, NULL);  	be_mcc_notify(adapter); @@ -1598,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, +			       NULL);  	req->fat_operation = cpu_to_le32(QUERY_FAT);  	status = be_mcc_notify_wait(adapter);  	if (!status) { @@ -1628,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)  	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;  	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, -			get_fat_cmd.size, -			&get_fat_cmd.dma); +					      get_fat_cmd.size, +					      &get_fat_cmd.dma);  	if (!get_fat_cmd.va) {  		status = -ENOMEM;  		dev_err(&adapter->pdev->dev, @@ -1652,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)  		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;  		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -				OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, -				&get_fat_cmd); +				       OPCODE_COMMON_MANAGE_FAT, payload_len, +				       wrb, &get_fat_cmd);  		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);  		req->read_log_offset = cpu_to_le32(log_offset); @@ -1664,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)  		if (!status) {  			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;  			memcpy(buf + offset, -				resp->data_buffer, -				le32_to_cpu(resp->read_log_length)); +			       resp->data_buffer, +			       le32_to_cpu(resp->read_log_length));  		} else {  			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");  			goto err; @@ -1675,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)  	}  err:  	pci_free_consistent(adapter->pdev, get_fat_cmd.size, -			get_fat_cmd.va, -			get_fat_cmd.dma); +			    get_fat_cmd.va, get_fat_cmd.dma);  	spin_unlock_bh(&adapter->mcc_lock);  }  /* Uses synchronous mcc */  int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, -			char *fw_on_flash) +		      char *fw_on_flash)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_get_fw_version *req; @@ -1699,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, +			       NULL);  	status = be_mcc_notify_wait(adapter);  	if (!status) {  		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); @@ -1715,11 +1784,12 @@ err:  /* set the EQ delay interval of an EQ to specified value   * Uses async mcc   */ -int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, +		      int num)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_modify_eq_delay *req; -	int status = 0; +	int status = 0, i;  	spin_lock_bh(&adapter->mcc_lock); @@ -1731,15 +1801,18 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, +			       NULL); -	req->num_eq = cpu_to_le32(1); -	req->delay[0].eq_id = cpu_to_le32(eq_id); -	req->delay[0].phase = 0; -	req->delay[0].delay_multiplier = cpu_to_le32(eqd); +	req->num_eq = cpu_to_le32(num); +	for (i = 0; i < num; i++) { +		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); +		req->set_eqd[i].phase = 0; +		req->set_eqd[i].delay_multiplier = +				cpu_to_le32(set_eqd[i].delay_multiplier); +	}  	be_mcc_notify(adapter); -  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status; @@ -1747,7 +1820,7 @@ err:  /* Uses sycnhronous mcc */  int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, -			u32 num, bool untagged, bool promiscuous) +		       u32 num)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_vlan_config *req; @@ -1763,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), +			       wrb, NULL);  	req->interface_id = if_id; -	req->promiscuous = promiscuous; -	req->untagged = untagged; +	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;  	req->num_vlan = num; -	if (!promiscuous) { -		memcpy(req->normal_vlan, vtag_array, -			req->num_vlan * sizeof(vtag_array[0])); -	} +	memcpy(req->normal_vlan, vtag_array, +	       req->num_vlan * sizeof(vtag_array[0]));  	status = be_mcc_notify_wait(adapter); -  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status; @@ -1797,21 +1867,28 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)  	}  	memset(req, 0, sizeof(*req));  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -				OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), -				wrb, mem); +			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), +			       wrb, mem);  	req->if_id = cpu_to_le32(adapter->if_handle);  	if (flags & IFF_PROMISC) {  		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | -					BE_IF_FLAGS_VLAN_PROMISCUOUS | -					BE_IF_FLAGS_MCAST_PROMISCUOUS); +						 BE_IF_FLAGS_VLAN_PROMISCUOUS | +						 BE_IF_FLAGS_MCAST_PROMISCUOUS);  		if (value == ON) -			req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | -						BE_IF_FLAGS_VLAN_PROMISCUOUS | -						BE_IF_FLAGS_MCAST_PROMISCUOUS); +			req->if_flags = +				cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | +					    BE_IF_FLAGS_VLAN_PROMISCUOUS | +					    BE_IF_FLAGS_MCAST_PROMISCUOUS);  	} else if (flags & IFF_ALLMULTI) {  		req->if_flags_mask = req->if_flags =  				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); +	} else if (flags & BE_FLAGS_VLAN_PROMISC) { +		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); + +		if (value == ON) +			req->if_flags = +				cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);  	} else {  		struct netdev_hw_addr *ha;  		int i = 0; @@ -1830,7 +1907,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)  			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);  	} +	if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) != +	    req->if_flags_mask) { +		dev_warn(&adapter->pdev->dev, +			 "Cannot set rx filter flags 0x%x\n", +			 req->if_flags_mask); +		dev_warn(&adapter->pdev->dev, +			 "Interface is capable of 0x%x flags only\n", +			 be_if_cap_flags(adapter)); +	} +	req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter)); +  	status = be_mcc_notify_wait(adapter); +  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status; @@ -1857,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), +			       wrb, NULL);  	req->tx_flow_control = cpu_to_le16((u16)tx_fc);  	req->rx_flow_control = cpu_to_le16((u16)rx_fc); @@ -1890,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), +			       wrb, NULL);  	status = be_mcc_notify_wait(adapter);  	if (!status) { @@ -1920,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, +			       sizeof(*req), wrb, NULL);  	status = be_mbox_notify_wait(adapter);  	if (!status) { @@ -1963,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, +			       NULL);  	status = be_mbox_notify_wait(adapter); @@ -1972,44 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)  }  int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, -			u32 rss_hash_opts, u16 table_size) +		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_rss_config *req; -	u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, -			0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, -			0x3ea83c02, 0x4a110304};  	int status; -	if (mutex_lock_interruptible(&adapter->mbox_lock)) -		return -1; +	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) +		return 0; -	wrb = wrb_from_mbox(adapter); +	spin_lock_bh(&adapter->mcc_lock); + +	wrb = wrb_from_mccq(adapter); +	if (!wrb) { +		status = -EBUSY; +		goto err; +	}  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -		OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); +			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);  	req->if_id = cpu_to_le32(adapter->if_handle);  	req->enable_rss = cpu_to_le16(rss_hash_opts);  	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); -	if (lancer_chip(adapter) || skyhawk_chip(adapter)) +	if (!BEx_chip(adapter))  		req->hdr.version = 1;  	memcpy(req->cpu_table, rsstable, table_size); -	memcpy(req->hash, myhash, sizeof(myhash)); +	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);  	be_dws_cpu_to_le(req->hash, sizeof(req->hash)); -	status = be_mbox_notify_wait(adapter); - -	mutex_unlock(&adapter->mbox_lock); +	status = be_mcc_notify_wait(adapter); +err: +	spin_unlock_bh(&adapter->mcc_lock);  	return status;  }  /* Uses sync mcc */  int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, -			u8 bcn, u8 sts, u8 state) +			    u8 bcn, u8 sts, u8 state)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_enable_disable_beacon *req; @@ -2025,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_ENABLE_DISABLE_BEACON, +			       sizeof(*req), wrb, NULL);  	req->port_num = port_num;  	req->beacon_state = state; @@ -2056,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), +			       wrb, NULL);  	req->port_num = port_num; @@ -2095,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -				OPCODE_COMMON_WRITE_OBJECT, -				sizeof(struct lancer_cmd_req_write_object), wrb, -				NULL); +			       OPCODE_COMMON_WRITE_OBJECT, +			       sizeof(struct lancer_cmd_req_write_object), wrb, +			       NULL);  	ctxt = &req->context;  	AMAP_SET_BITS(struct amap_lancer_write_obj_context, -			write_length, ctxt, data_size); +		      write_length, ctxt, data_size);  	if (data_size == 0)  		AMAP_SET_BITS(struct amap_lancer_write_obj_context, -				eof, ctxt, 1); +			      eof, ctxt, 1);  	else  		AMAP_SET_BITS(struct amap_lancer_write_obj_context, -				eof, ctxt, 0); +			      eof, ctxt, 0);  	be_dws_cpu_to_le(ctxt, sizeof(req->context));  	req->write_offset = cpu_to_le32(data_offset); @@ -2116,15 +2214,15 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,  	req->descriptor_count = cpu_to_le32(1);  	req->buf_len = cpu_to_le32(data_size);  	req->addr_low = cpu_to_le32((cmd->dma + -				sizeof(struct lancer_cmd_req_write_object)) -				& 0xFFFFFFFF); +				     sizeof(struct lancer_cmd_req_write_object)) +				    & 0xFFFFFFFF);  	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +  				sizeof(struct lancer_cmd_req_write_object)));  	be_mcc_notify(adapter);  	spin_unlock_bh(&adapter->mcc_lock); -	if (!wait_for_completion_timeout(&adapter->flash_compl, +	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,  					 msecs_to_jiffies(60000)))  		status = -1;  	else @@ -2146,8 +2244,8 @@ err_unlock:  }  int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, -		u32 data_size, u32 data_offset, const char *obj_name, -		u32 *data_read, u32 *eof, u8 *addn_status) +			   u32 data_size, u32 data_offset, const char *obj_name, +			   u32 *data_read, u32 *eof, u8 *addn_status)  {  	struct be_mcc_wrb *wrb;  	struct lancer_cmd_req_read_object *req; @@ -2165,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_READ_OBJECT, -			sizeof(struct lancer_cmd_req_read_object), wrb, -			NULL); +			       OPCODE_COMMON_READ_OBJECT, +			       sizeof(struct lancer_cmd_req_read_object), wrb, +			       NULL);  	req->desired_read_len = cpu_to_le32(data_size);  	req->read_offset = cpu_to_le32(data_offset); @@ -2193,7 +2291,7 @@ err_unlock:  }  int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, -			u32 flash_type, u32 flash_opcode, u32 buf_size) +			  u32 flash_type, u32 flash_opcode, u32 buf_size)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_write_flashrom *req; @@ -2210,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,  	req = cmd->va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); +			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, +			       cmd);  	req->params.op_type = cpu_to_le32(flash_type);  	req->params.op_code = cpu_to_le32(flash_opcode); @@ -2219,8 +2318,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,  	be_mcc_notify(adapter);  	spin_unlock_bh(&adapter->mcc_lock); -	if (!wait_for_completion_timeout(&adapter->flash_compl, -			msecs_to_jiffies(40000))) +	if (!wait_for_completion_timeout(&adapter->et_cmd_compl, +					 msecs_to_jiffies(40000)))  		status = -1;  	else  		status = adapter->flash_status; @@ -2233,7 +2332,7 @@ err_unlock:  }  int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, -			 int offset) +			  u16 optype, int offset)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_read_flash_crc *req; @@ -2252,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,  			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),  			       wrb, NULL); -	req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); +	req->params.op_type = cpu_to_le32(optype);  	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);  	req->params.offset = cpu_to_le32(offset);  	req->params.data_buf_size = cpu_to_le32(0x4); @@ -2267,7 +2366,7 @@ err:  }  int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, -				struct be_dma_mem *nonemb_cmd) +			    struct be_dma_mem *nonemb_cmd)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_acpi_wol_magic_config *req; @@ -2283,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,  	req = nonemb_cmd->va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, -		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, -		nonemb_cmd); +			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), +			       wrb, nonemb_cmd);  	memcpy(req->magic_mac, mac, ETH_ALEN);  	status = be_mcc_notify_wait(adapter); @@ -2312,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, -			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, -			NULL); +			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), +			       wrb, NULL);  	req->src_port = port_num;  	req->dest_port = port_num; @@ -2327,10 +2426,12 @@ err:  }  int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, -		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) +			 u32 loopback_type, u32 pkt_size, u32 num_pkts, +			 u64 pattern)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_loopback_test *req; +	struct be_cmd_resp_loopback_test *resp;  	int status;  	spin_lock_bh(&adapter->mcc_lock); @@ -2344,9 +2445,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, -			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); -	req->hdr.timeout = cpu_to_le32(4); +			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, +			       NULL); +	req->hdr.timeout = cpu_to_le32(15);  	req->pattern = cpu_to_le64(pattern);  	req->src_port = cpu_to_le32(port_num);  	req->dest_port = cpu_to_le32(port_num); @@ -2354,19 +2456,22 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,  	req->num_pkts = cpu_to_le32(num_pkts);  	req->loopback_type = cpu_to_le32(loopback_type); -	status = be_mcc_notify_wait(adapter); -	if (!status) { -		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); -		status = le32_to_cpu(resp->status); -	} +	be_mcc_notify(adapter); +	spin_unlock_bh(&adapter->mcc_lock); + +	wait_for_completion(&adapter->et_cmd_compl); +	resp = embedded_payload(wrb); +	status = le32_to_cpu(resp->status); + +	return status;  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status;  }  int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, -				u32 byte_cnt, struct be_dma_mem *cmd) +			u32 byte_cnt, struct be_dma_mem *cmd)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_ddrdma_test *req; @@ -2382,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,  	}  	req = cmd->va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, -			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); +			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, +			       cmd);  	req->pattern = cpu_to_le64(pattern);  	req->byte_count = cpu_to_le32(byte_cnt); @@ -2410,7 +2516,7 @@ err:  }  int be_cmd_get_seeprom_data(struct be_adapter *adapter, -				struct be_dma_mem *nonemb_cmd) +			    struct be_dma_mem *nonemb_cmd)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_seeprom_read *req; @@ -2426,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,  	req = nonemb_cmd->va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, -			nonemb_cmd); +			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, +			       nonemb_cmd);  	status = be_mcc_notify_wait(adapter); @@ -2455,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)  		goto err;  	}  	cmd.size = sizeof(struct be_cmd_req_get_phy_info); -	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, -					&cmd.dma); +	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);  	if (!cmd.va) {  		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");  		status = -ENOMEM; @@ -2466,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)  	req = cmd.va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), -			wrb, &cmd); +			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), +			       wrb, &cmd);  	status = be_mcc_notify_wait(adapter);  	if (!status) { @@ -2489,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)  				BE_SUPPORTED_SPEED_1GBPS;  		}  	} -	pci_free_consistent(adapter->pdev, cmd.size, -				cmd.va, cmd.dma); +	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status; @@ -2513,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);  	req->hdr.domain = domain;  	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); @@ -2542,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)  	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));  	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);  	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, -						&attribs_cmd.dma); +					      &attribs_cmd.dma);  	if (!attribs_cmd.va) { -		dev_err(&adapter->pdev->dev, -				"Memory allocation failure\n"); +		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");  		status = -ENOMEM;  		goto err;  	} @@ -2558,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)  	req = attribs_cmd.va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, -			&attribs_cmd); +			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, +			       wrb, &attribs_cmd);  	status = be_mbox_notify_wait(adapter);  	if (!status) { @@ -2594,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)  	req = embedded_payload(wrb);  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -		OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, +			       sizeof(*req), wrb, NULL);  	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |  				CAPABILITY_BE3_NATIVE_ERX_API); @@ -2643,6 +2747,13 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,  		struct be_cmd_resp_get_fn_privileges *resp =  						embedded_payload(wrb);  		*privilege = le32_to_cpu(resp->privilege_mask); + +		/* In UMC mode FW does not return right privileges. +		 * Override with correct privilege equivalent to PF. +		 */ +		if (BEx_chip(adapter) && be_is_mc(adapter) && +		    be_physfn(adapter)) +			*privilege = MAX_PRIVILEGES;  	}  err: @@ -2687,7 +2798,8 @@ err:   *		  If pmac_id is returned, pmac_id_valid is returned as true   */  int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, -			     bool *pmac_id_valid, u32 *pmac_id, u8 domain) +			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, +			     u8 domain)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_get_mac_list *req; @@ -2699,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,  	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));  	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);  	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, -			get_mac_list_cmd.size, -			&get_mac_list_cmd.dma); +						   get_mac_list_cmd.size, +						   &get_mac_list_cmd.dma);  	if (!get_mac_list_cmd.va) {  		dev_err(&adapter->pdev->dev, -				"Memory allocation failure during GET_MAC_LIST\n"); +			"Memory allocation failure during GET_MAC_LIST\n");  		return -ENOMEM;  	} @@ -2725,7 +2837,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,  	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;  	if (*pmac_id_valid) {  		req->mac_id = cpu_to_le32(*pmac_id); -		req->iface_id = cpu_to_le16(adapter->if_handle); +		req->iface_id = cpu_to_le16(if_handle);  		req->perm_override = 0;  	} else {  		req->perm_override = 1; @@ -2768,27 +2880,31 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,  		/* If no active mac_id found, return first mac addr */  		*pmac_id_valid = false;  		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, -								ETH_ALEN); +		       ETH_ALEN);  	}  out:  	spin_unlock_bh(&adapter->mcc_lock);  	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, -			get_mac_list_cmd.va, get_mac_list_cmd.dma); +			    get_mac_list_cmd.va, get_mac_list_cmd.dma);  	return status;  } -int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac) +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, +			  u8 *mac, u32 if_handle, bool active, u32 domain)  { -	bool active = true; +	if (!active) +		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, +					 if_handle, domain);  	if (BEx_chip(adapter))  		return be_cmd_mac_addr_query(adapter, mac, false, -					     adapter->if_handle, curr_pmac_id); +					     if_handle, curr_pmac_id);  	else  		/* Fetch the MAC address using pmac_id */  		return be_cmd_get_mac_from_list(adapter, mac, &active, -						&curr_pmac_id, 0); +						&curr_pmac_id, +						if_handle, domain);  }  int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) @@ -2807,7 +2923,7 @@ int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)  						       adapter->if_handle, 0);  	} else {  		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, -						  NULL, 0); +						  NULL, adapter->if_handle, 0);  	}  	return status; @@ -2825,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,  	memset(&cmd, 0, sizeof(struct be_dma_mem));  	cmd.size = sizeof(struct be_cmd_req_set_mac_list);  	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, -			&cmd.dma, GFP_KERNEL); +				    &cmd.dma, GFP_KERNEL);  	if (!cmd.va)  		return -ENOMEM; @@ -2839,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,  	req = cmd.va;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -				OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), -				wrb, &cmd); +			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), +			       wrb, &cmd);  	req->hdr.domain = domain;  	req->mac_count = mac_count; @@ -2850,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,  	status = be_mcc_notify_wait(adapter);  err: -	dma_free_coherent(&adapter->pdev->dev, cmd.size, -				cmd.va, cmd.dma); +	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);  	spin_unlock_bh(&adapter->mcc_lock);  	return status;  } @@ -2868,7 +2983,8 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)  	int status;  	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, -					  &pmac_id, dom); +					  &pmac_id, if_id, dom); +  	if (!status && active_mac)  		be_cmd_pmac_del(adapter, if_id, pmac_id, dom); @@ -2895,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,  	ctxt = &req->context;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, +			       NULL);  	req->hdr.domain = domain;  	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); @@ -2941,14 +3058,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,  	ctxt = &req->context;  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); +			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, +			       NULL);  	req->hdr.domain = domain;  	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,  		      ctxt, intf_id);  	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); -	if (!BEx_chip(adapter)) { +	if (!BEx_chip(adapter) && mode) {  		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,  			      ctxt, adapter->hba_port_num);  		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); @@ -2959,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,  	if (!status) {  		struct be_cmd_resp_get_hsw_config *resp =  						embedded_payload(wrb); -		be_dws_le_to_cpu(&resp->context, -						sizeof(resp->context)); +		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));  		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, -							pvid, &resp->context); +				    pvid, &resp->context);  		if (pvid)  			*pvid = le16_to_cpu(vid);  		if (mode) @@ -2979,24 +3096,24 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_acpi_wol_magic_config_v1 *req; -	int status; -	int payload_len = sizeof(*req); +	int status = 0;  	struct be_dma_mem cmd;  	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,  			    CMD_SUBSYSTEM_ETH))  		return -EPERM; +	if (be_is_wol_excluded(adapter)) +		return status; +  	if (mutex_lock_interruptible(&adapter->mbox_lock))  		return -1;  	memset(&cmd, 0, sizeof(struct be_dma_mem));  	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); -	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, -					       &cmd.dma); +	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);  	if (!cmd.va) { -		dev_err(&adapter->pdev->dev, -				"Memory allocation failure\n"); +		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");  		status = -ENOMEM;  		goto err;  	} @@ -3011,7 +3128,7 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,  			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, -			       payload_len, wrb, &cmd); +			       sizeof(*req), wrb, &cmd);  	req->hdr.version = 1;  	req->query_options = BE_GET_WOL_CAP; @@ -3021,13 +3138,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)  		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;  		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; -		/* the command could succeed misleadingly on old f/w -		 * which is not aware of the V1 version. fake an error. */ -		if (resp->hdr.response_length < payload_len) { -			status = -1; -			goto err; -		}  		adapter->wol_cap = resp->wol_settings; +		if (adapter->wol_cap & BE_WOL_CAP) +			adapter->wol_en = true;  	}  err:  	mutex_unlock(&adapter->mbox_lock); @@ -3036,6 +3149,76 @@ err:  	return status;  } + +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) +{ +	struct be_dma_mem extfat_cmd; +	struct be_fat_conf_params *cfgs; +	int status; +	int i, j; + +	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); +	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); +	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, +					     &extfat_cmd.dma); +	if (!extfat_cmd.va) +		return -ENOMEM; + +	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); +	if (status) +		goto err; + +	cfgs = (struct be_fat_conf_params *) +			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); +	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { +		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); +		for (j = 0; j < num_modes; j++) { +			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) +				cfgs->module[i].trace_lvl[j].dbg_lvl = +							cpu_to_le32(level); +		} +	} + +	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); +err: +	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, +			    extfat_cmd.dma); +	return status; +} + +int be_cmd_get_fw_log_level(struct be_adapter *adapter) +{ +	struct be_dma_mem extfat_cmd; +	struct be_fat_conf_params *cfgs; +	int status, j; +	int level = 0; + +	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); +	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); +	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, +					     &extfat_cmd.dma); + +	if (!extfat_cmd.va) { +		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", +			__func__); +		goto err; +	} + +	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); +	if (!status) { +		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + +						sizeof(struct be_cmd_resp_hdr)); +		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { +			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) +				level = cfgs->module[0].trace_lvl[j].dbg_lvl; +		} +	} +	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, +			    extfat_cmd.dma); +err: +	return level; +} +  int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,  				   struct be_dma_mem *cmd)  { @@ -3167,6 +3350,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,  	return NULL;  } +static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) +{ +	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; +	int i; + +	for (i = 0; i < desc_count; i++) { +		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) +			return (struct be_port_res_desc *)hdr; + +		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; +		hdr = (void *)hdr + hdr->desc_len; +	} +	return NULL; +} +  static void be_copy_nic_desc(struct be_resources *res,  			     struct be_nic_res_desc *desc)  { @@ -3198,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)  	memset(&cmd, 0, sizeof(struct be_dma_mem));  	cmd.size = sizeof(struct be_cmd_resp_get_func_config); -	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, -				      &cmd.dma); +	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);  	if (!cmd.va) {  		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");  		status = -ENOMEM; @@ -3245,7 +3442,7 @@ err:  /* Uses mbox */  static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter, -					u8 domain, struct be_dma_mem *cmd) +					  u8 domain, struct be_dma_mem *cmd)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_get_profile_config *req; @@ -3273,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,  /* Uses sync mcc */  static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter, -					u8 domain, struct be_dma_mem *cmd) +					  u8 domain, struct be_dma_mem *cmd)  {  	struct be_mcc_wrb *wrb;  	struct be_cmd_req_get_profile_config *req; @@ -3310,6 +3507,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,  {  	struct be_cmd_resp_get_profile_config *resp;  	struct be_pcie_res_desc *pcie; +	struct be_port_res_desc *port;  	struct be_nic_res_desc *nic;  	struct be_queue_info *mccq = &adapter->mcc_obj.q;  	struct be_dma_mem cmd; @@ -3332,11 +3530,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,  	resp = cmd.va;  	desc_count = le32_to_cpu(resp->desc_count); -	pcie =  be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, -				 desc_count); +	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param, +				desc_count);  	if (pcie)  		res->max_vfs = le16_to_cpu(pcie->num_vfs); +	port = be_get_port_desc(resp->func_param, desc_count); +	if (port) +		adapter->mc_type = port->mc_type; +  	nic = be_get_nic_desc(resp->func_param, desc_count);  	if (nic)  		be_copy_nic_desc(res, nic); @@ -3347,14 +3549,11 @@ err:  	return status;  } -/* Currently only Lancer uses this command and it supports version 0 only - * Uses sync mcc - */ -int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, -			      u8 domain) +int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, +			      int size, u8 version, u8 domain)  { -	struct be_mcc_wrb *wrb;  	struct be_cmd_req_set_profile_config *req; +	struct be_mcc_wrb *wrb;  	int status;  	spin_lock_bh(&adapter->mcc_lock); @@ -3366,44 +3565,130 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,  	}  	req = embedded_payload(wrb); -  	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,  			       OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),  			       wrb, NULL); +	req->hdr.version = version;  	req->hdr.domain = domain;  	req->desc_count = cpu_to_le32(1); -	req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; -	req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; -	req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV); -	req->nic_desc.pf_num = adapter->pf_number; -	req->nic_desc.vf_num = domain; - -	/* Mark fields invalid */ -	req->nic_desc.unicast_mac_count = 0xFFFF; -	req->nic_desc.mcc_count = 0xFFFF; -	req->nic_desc.vlan_count = 0xFFFF; -	req->nic_desc.mcast_mac_count = 0xFFFF; -	req->nic_desc.txq_count = 0xFFFF; -	req->nic_desc.rq_count = 0xFFFF; -	req->nic_desc.rssq_count = 0xFFFF; -	req->nic_desc.lro_count = 0xFFFF; -	req->nic_desc.cq_count = 0xFFFF; -	req->nic_desc.toe_conn_count = 0xFFFF; -	req->nic_desc.eq_count = 0xFFFF; -	req->nic_desc.link_param = 0xFF; -	req->nic_desc.bw_min = 0xFFFFFFFF; -	req->nic_desc.acpi_params = 0xFF; -	req->nic_desc.wol_param = 0x0F; - -	/* Change BW */ -	req->nic_desc.bw_min = cpu_to_le32(bps); -	req->nic_desc.bw_max = cpu_to_le32(bps); +	memcpy(req->desc, desc, size); +  	status = be_mcc_notify_wait(adapter);  err:  	spin_unlock_bh(&adapter->mcc_lock);  	return status;  } +/* Mark all fields invalid */ +void be_reset_nic_desc(struct be_nic_res_desc *nic) +{ +	memset(nic, 0, sizeof(*nic)); +	nic->unicast_mac_count = 0xFFFF; +	nic->mcc_count = 0xFFFF; +	nic->vlan_count = 0xFFFF; +	nic->mcast_mac_count = 0xFFFF; +	nic->txq_count = 0xFFFF; +	nic->rq_count = 0xFFFF; +	nic->rssq_count = 0xFFFF; +	nic->lro_count = 0xFFFF; +	nic->cq_count = 0xFFFF; +	nic->toe_conn_count = 0xFFFF; +	nic->eq_count = 0xFFFF; +	nic->iface_count = 0xFFFF; +	nic->link_param = 0xFF; +	nic->channel_id_param = cpu_to_le16(0xF000); +	nic->acpi_params = 0xFF; +	nic->wol_param = 0x0F; +	nic->tunnel_iface_count = 0xFFFF; +	nic->direct_tenant_iface_count = 0xFFFF; +	nic->bw_max = 0xFFFFFFFF; +} + +int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, +		      u8 domain) +{ +	struct be_nic_res_desc nic_desc; +	u32 bw_percent; +	u16 version = 0; + +	if (BE3_chip(adapter)) +		return be_cmd_set_qos(adapter, max_rate / 10, domain); + +	be_reset_nic_desc(&nic_desc); +	nic_desc.pf_num = adapter->pf_number; +	nic_desc.vf_num = domain; +	if (lancer_chip(adapter)) { +		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; +		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; +		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | +					(1 << NOSV_SHIFT); +		nic_desc.bw_max = cpu_to_le32(max_rate / 10); +	} else { +		version = 1; +		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; +		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; +		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); +		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; +		nic_desc.bw_max = cpu_to_le32(bw_percent); +	} + +	return be_cmd_set_profile_config(adapter, &nic_desc, +					 nic_desc.hdr.desc_len, +					 version, domain); +} + +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) +{ +	struct be_mcc_wrb *wrb; +	struct be_cmd_req_manage_iface_filters *req; +	int status; + +	if (iface == 0xFFFFFFFF) +		return -1; + +	spin_lock_bh(&adapter->mcc_lock); + +	wrb = wrb_from_mccq(adapter); +	if (!wrb) { +		status = -EBUSY; +		goto err; +	} +	req = embedded_payload(wrb); + +	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), +			       wrb, NULL); +	req->op = op; +	req->target_iface_id = cpu_to_le32(iface); + +	status = be_mcc_notify_wait(adapter); +err: +	spin_unlock_bh(&adapter->mcc_lock); +	return status; +} + +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) +{ +	struct be_port_res_desc port_desc; + +	memset(&port_desc, 0, sizeof(port_desc)); +	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; +	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; +	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); +	port_desc.link_num = adapter->hba_port_num; +	if (port) { +		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | +					(1 << RCVID_SHIFT); +		port_desc.nv_port = swab16(port); +	} else { +		port_desc.nv_flags = NV_TYPE_DISABLED; +		port_desc.nv_port = 0; +	} + +	return be_cmd_set_profile_config(adapter, &port_desc, +					 RESOURCE_DESC_SIZE_V1, 1, 0); +} +  int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,  		     int vf_num)  { @@ -3510,7 +3795,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)  	struct be_cmd_enable_disable_vf *req;  	int status; -	if (!lancer_chip(adapter)) +	if (BEx_chip(adapter))  		return 0;  	spin_lock_bh(&adapter->mcc_lock); @@ -3560,8 +3845,81 @@ int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)  	return status;  } +/* Uses MBOX */ +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) +{ +	struct be_cmd_req_get_active_profile *req; +	struct be_mcc_wrb *wrb; +	int status; + +	if (mutex_lock_interruptible(&adapter->mbox_lock)) +		return -1; + +	wrb = wrb_from_mbox(adapter); +	if (!wrb) { +		status = -EBUSY; +		goto err; +	} + +	req = embedded_payload(wrb); + +	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), +			       wrb, NULL); + +	status = be_mbox_notify_wait(adapter); +	if (!status) { +		struct be_cmd_resp_get_active_profile *resp = +							embedded_payload(wrb); +		*profile_id = le16_to_cpu(resp->active_profile_id); +	} + +err: +	mutex_unlock(&adapter->mbox_lock); +	return status; +} + +int be_cmd_set_logical_link_config(struct be_adapter *adapter, +				   int link_state, u8 domain) +{ +	struct be_mcc_wrb *wrb; +	struct be_cmd_req_set_ll_link *req; +	int status; + +	if (BEx_chip(adapter) || lancer_chip(adapter)) +		return 0; + +	spin_lock_bh(&adapter->mcc_lock); + +	wrb = wrb_from_mccq(adapter); +	if (!wrb) { +		status = -EBUSY; +		goto err; +	} + +	req = embedded_payload(wrb); + +	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, +			       sizeof(*req), wrb, NULL); + +	req->hdr.version = 1; +	req->hdr.domain = domain; + +	if (link_state == IFLA_VF_LINK_STATE_ENABLE) +		req->link_config |= 1; + +	if (link_state == IFLA_VF_LINK_STATE_AUTO) +		req->link_config |= 1 << PLINK_TRACK_SHIFT; + +	status = be_mcc_notify_wait(adapter); +err: +	spin_unlock_bh(&adapter->mcc_lock); +	return status; +} +  int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, -			int wrb_payload_size, u16 *cmd_status, u16 *ext_status) +		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)  {  	struct be_adapter *adapter = netdev_priv(netdev_handle);  	struct be_mcc_wrb *wrb; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d026226db88..59b3c056f32 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -50,7 +50,7 @@ struct be_mcc_wrb {  #define CQE_FLAGS_CONSUMED_MASK 	(1 << 27)  /* Completion Status */ -enum { +enum mcc_base_status {  	MCC_STATUS_SUCCESS = 0,  	MCC_STATUS_FAILED = 1,  	MCC_STATUS_ILLEGAL_REQUEST = 2, @@ -60,10 +60,25 @@ enum {  	MCC_STATUS_NOT_SUPPORTED = 66  }; -#define CQE_STATUS_COMPL_MASK		0xFFFF -#define CQE_STATUS_COMPL_SHIFT		0	/* bits 0 - 15 */ -#define CQE_STATUS_EXTD_MASK		0xFFFF -#define CQE_STATUS_EXTD_SHIFT		16	/* bits 16 - 31 */ +/* Additional status */ +enum mcc_addl_status { +	MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16, +	MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d, +	MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a +}; + +#define CQE_BASE_STATUS_MASK		0xFFFF +#define CQE_BASE_STATUS_SHIFT		0	/* bits 0 - 15 */ +#define CQE_ADDL_STATUS_MASK		0xFF +#define CQE_ADDL_STATUS_SHIFT		16	/* bits 16 - 31 */ + +#define base_status(status)		\ +		((enum mcc_base_status)	\ +			(status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0)) +#define addl_status(status)		\ +		((enum mcc_addl_status)	\ +			(status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \ +					CQE_ADDL_STATUS_MASK : 0))  struct be_mcc_compl {  	u32 status;		/* dword 0 */ @@ -72,13 +87,13 @@ struct be_mcc_compl {  	u32 flags;		/* dword 3 */  }; -/* When the async bit of mcc_compl is set, the last 4 bytes of - * mcc_compl is interpreted as follows: +/* When the async bit of mcc_compl flags is set, flags + * is interpreted as follows:   */ -#define ASYNC_TRAILER_EVENT_CODE_SHIFT	8	/* bits 8 - 15 */ -#define ASYNC_TRAILER_EVENT_CODE_MASK	0xFF -#define ASYNC_TRAILER_EVENT_TYPE_SHIFT	16 -#define ASYNC_TRAILER_EVENT_TYPE_MASK	0xFF +#define ASYNC_EVENT_CODE_SHIFT		8	/* bits 8 - 15 */ +#define ASYNC_EVENT_CODE_MASK		0xFF +#define ASYNC_EVENT_TYPE_SHIFT		16 +#define ASYNC_EVENT_TYPE_MASK		0xFF  #define ASYNC_EVENT_CODE_LINK_STATE	0x1  #define ASYNC_EVENT_CODE_GRP_5		0x5  #define ASYNC_EVENT_QOS_SPEED		0x1 @@ -87,10 +102,6 @@ struct be_mcc_compl {  #define ASYNC_EVENT_CODE_QNQ		0x6  #define ASYNC_DEBUG_EVENT_TYPE_QNQ	1 -struct be_async_event_trailer { -	u32 code; -}; -  enum {  	LINK_DOWN	= 0x0,  	LINK_UP		= 0x1 @@ -98,7 +109,7 @@ enum {  #define LINK_STATUS_MASK			0x1  #define LOGICAL_LINK_STATUS_MASK		0x2 -/* When the event code of an async trailer is link-state, the mcc_compl +/* When the event code of compl->flags is link-state, the mcc_compl   * must be interpreted as follows   */  struct be_async_event_link_state { @@ -108,10 +119,10 @@ struct be_async_event_link_state {  	u8 port_speed;  	u8 port_fault;  	u8 rsvd0[7]; -	struct be_async_event_trailer trailer; +	u32 flags;  } __packed; -/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED +/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED   * the mcc_compl must be interpreted as follows   */  struct be_async_event_grp5_qos_link_speed { @@ -119,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {  	u8 rsvd[5];  	u16 qos_link_speed;  	u32 event_tag; -	struct be_async_event_trailer trailer; +	u32 flags;  } __packed; -/* When the event code of an async trailer is GRP5 and event type is +/* When the event code of compl->flags is GRP5 and event type is   * CoS-Priority, the mcc_compl must be interpreted as follows   */  struct be_async_event_grp5_cos_priority { @@ -132,10 +143,10 @@ struct be_async_event_grp5_cos_priority {  	u8 valid;  	u8 rsvd0;  	u8 event_tag; -	struct be_async_event_trailer trailer; +	u32 flags;  } __packed; -/* When the event code of an async trailer is GRP5 and event type is +/* When the event code of compl->flags is GRP5 and event type is   * PVID state, the mcc_compl must be interpreted as follows   */  struct be_async_event_grp5_pvid_state { @@ -144,7 +155,7 @@ struct be_async_event_grp5_pvid_state {  	u16 tag;  	u32 event_tag;  	u32 rsvd1; -	struct be_async_event_trailer trailer; +	u32 flags;  } __packed;  /* async event indicating outer VLAN tag in QnQ */ @@ -154,7 +165,7 @@ struct be_async_event_qnq {  	u16 vlan_tag;  	u32 event_tag;  	u8 rsvd1[4]; -	struct be_async_event_trailer trailer; +	u32 flags;  } __packed;  struct be_mcc_mailbox { @@ -201,6 +212,7 @@ struct be_mcc_mailbox {  #define OPCODE_COMMON_GET_BEACON_STATE			70  #define OPCODE_COMMON_READ_TRANSRECV_DATA		73  #define OPCODE_COMMON_GET_PORT_NAME			77 +#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG		80  #define OPCODE_COMMON_SET_INTERRUPT_ENABLE		89  #define OPCODE_COMMON_SET_FN_PRIVILEGES			100  #define OPCODE_COMMON_GET_PHY_DETAILS			102 @@ -214,10 +226,12 @@ struct be_mcc_mailbox {  #define OPCODE_COMMON_GET_FUNC_CONFIG			160  #define OPCODE_COMMON_GET_PROFILE_CONFIG		164  #define OPCODE_COMMON_SET_PROFILE_CONFIG		165 +#define OPCODE_COMMON_GET_ACTIVE_PROFILE		167  #define OPCODE_COMMON_SET_HSW_CONFIG			153  #define OPCODE_COMMON_GET_FN_PRIVILEGES			170  #define OPCODE_COMMON_READ_OBJECT			171  #define OPCODE_COMMON_WRITE_OBJECT			172 +#define OPCODE_COMMON_MANAGE_IFACE_FILTERS		193  #define OPCODE_COMMON_GET_IFACE_LIST			194  #define OPCODE_COMMON_ENABLE_DISABLE_VF			196 @@ -253,8 +267,8 @@ struct be_cmd_resp_hdr {  	u8 opcode;		/* dword 0 */  	u8 subsystem;		/* dword 0 */  	u8 rsvd[2];		/* dword 0 */ -	u8 status;		/* dword 1 */ -	u8 add_status;		/* dword 1 */ +	u8 base_status;		/* dword 1 */ +	u8 addl_status;		/* dword 1 */  	u8 rsvd1[2];		/* dword 1 */  	u32 response_length;	/* dword 2 */  	u32 actual_resp_len;	/* dword 3 */ @@ -450,7 +464,7 @@ struct amap_mcc_context_be {  	u8 rsvd2[32];  } __packed; -struct amap_mcc_context_lancer { +struct amap_mcc_context_v1 {  	u8 async_cq_id[16];  	u8 ring_size[4];  	u8 rsvd0[12]; @@ -474,7 +488,7 @@ struct be_cmd_req_mcc_ext_create {  	u16 num_pages;  	u16 cq_id;  	u32 async_event_bitmap[1]; -	u8 context[sizeof(struct amap_mcc_context_be) / 8]; +	u8 context[sizeof(struct amap_mcc_context_v1) / 8];  	struct phys_addr pages[8];  } __packed; @@ -1055,14 +1069,16 @@ struct be_cmd_resp_get_flow_control {  } __packed;  /******************** Modify EQ Delay *******************/ +struct be_set_eqd { +	u32 eq_id; +	u32 phase; +	u32 delay_multiplier; +}; +  struct be_cmd_req_modify_eq_delay {  	struct be_cmd_req_hdr hdr;  	u32 num_eq; -	struct { -		u32 eq_id; -		u32 phase; -		u32 delay_multiplier; -	} delay[8]; +	struct be_set_eqd set_eqd[MAX_EVT_QS];  } __packed;  struct be_cmd_resp_modify_eq_delay { @@ -1075,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {   * based on the skew/IPL.   */  #define RDMA_ENABLED				0x4 -#define FLEX10_MODE				0x400 +#define QNQ_MODE				0x400  #define VNIC_MODE				0x20000  #define UMC_ENABLED				0x1000000  struct be_cmd_req_query_fw_cfg { @@ -1179,7 +1195,8 @@ struct be_cmd_read_flash_crc {  	struct flashrom_params params;  	u8 crc[4];  	u8 rsvd[4]; -}; +} __packed; +  /**************** Lancer Firmware Flash ************/  struct amap_lancer_write_obj_context {  	u8 write_length[24]; @@ -1658,6 +1675,67 @@ struct be_erx_stats_v1 {  	u32 rsvd[4];  }; +struct be_port_rxf_stats_v2 { +	u32 rsvd0[10]; +	u32 roce_bytes_received_lsd; +	u32 roce_bytes_received_msd; +	u32 rsvd1[5]; +	u32 roce_frames_received; +	u32 rx_crc_errors; +	u32 rx_alignment_symbol_errors; +	u32 rx_pause_frames; +	u32 rx_priority_pause_frames; +	u32 rx_control_frames; +	u32 rx_in_range_errors; +	u32 rx_out_range_errors; +	u32 rx_frame_too_long; +	u32 rx_address_filtered; +	u32 rx_dropped_too_small; +	u32 rx_dropped_too_short; +	u32 rx_dropped_header_too_small; +	u32 rx_dropped_tcp_length; +	u32 rx_dropped_runt; +	u32 rsvd2[10]; +	u32 rx_ip_checksum_errs; +	u32 rx_tcp_checksum_errs; +	u32 rx_udp_checksum_errs; +	u32 rsvd3[7]; +	u32 rx_switched_unicast_packets; +	u32 rx_switched_multicast_packets; +	u32 rx_switched_broadcast_packets; +	u32 rsvd4[3]; +	u32 tx_pauseframes; +	u32 tx_priority_pauseframes; +	u32 tx_controlframes; +	u32 rsvd5[10]; +	u32 rxpp_fifo_overflow_drop; +	u32 rx_input_fifo_overflow_drop; +	u32 pmem_fifo_overflow_drop; +	u32 jabber_events; +	u32 rsvd6[3]; +	u32 rx_drops_payload_size; +	u32 rx_drops_clipped_header; +	u32 rx_drops_crc; +	u32 roce_drops_payload_len; +	u32 roce_drops_crc; +	u32 rsvd7[19]; +}; + +struct be_rxf_stats_v2 { +	struct be_port_rxf_stats_v2 port[4]; +	u32 rsvd0[2]; +	u32 rx_drops_no_pbuf; +	u32 rx_drops_no_txpb; +	u32 rx_drops_no_erx_descr; +	u32 rx_drops_no_tpre_descr; +	u32 rsvd1[6]; +	u32 rx_drops_too_many_frags; +	u32 rx_drops_invalid_ring; +	u32 forwarded_packets; +	u32 rx_drops_mtu; +	u32 rsvd2[35]; +}; +  struct be_hw_stats_v1 {  	struct be_rxf_stats_v1 rxf;  	u32 rsvd0[BE_TXP_SW_SZ]; @@ -1676,6 +1754,29 @@ struct be_cmd_resp_get_stats_v1 {  	struct be_hw_stats_v1 hw_stats;  }; +struct be_erx_stats_v2 { +	u32 rx_drops_no_fragments[136];     /* dwordS 0 to 135*/ +	u32 rsvd[3]; +}; + +struct be_hw_stats_v2 { +	struct be_rxf_stats_v2 rxf; +	u32 rsvd0[BE_TXP_SW_SZ]; +	struct be_erx_stats_v2 erx; +	struct be_pmem_stats pmem; +	u32 rsvd1[18]; +}; + +struct be_cmd_req_get_stats_v2 { +	struct be_cmd_req_hdr hdr; +	u8 rsvd[sizeof(struct be_hw_stats_v2)]; +}; + +struct be_cmd_resp_get_stats_v2 { +	struct be_cmd_resp_hdr hdr; +	struct be_hw_stats_v2 hw_stats; +}; +  /************** get fat capabilites *******************/  #define MAX_MODULES 27  #define MAX_MODES 4 @@ -1731,20 +1832,36 @@ struct be_cmd_req_set_ext_fat_caps {  #define NIC_RESOURCE_DESC_TYPE_V0		0x41  #define PCIE_RESOURCE_DESC_TYPE_V1		0x50  #define NIC_RESOURCE_DESC_TYPE_V1		0x51 +#define PORT_RESOURCE_DESC_TYPE_V1		0x55  #define MAX_RESOURCE_DESC			264 -/* QOS unit number */ -#define QUN					4 -/* Immediate */ -#define IMM					6 -/* No save */ -#define NOSV					7 +#define IMM_SHIFT				6	/* Immediate */ +#define NOSV_SHIFT				7	/* No save */  struct be_res_desc_hdr {  	u8 desc_type;  	u8 desc_len;  } __packed; +struct be_port_res_desc { +	struct be_res_desc_hdr hdr; +	u8 rsvd0; +	u8 flags; +	u8 link_num; +	u8 mc_type; +	u16 rsvd1; + +#define NV_TYPE_MASK				0x3	/* bits 0-1 */ +#define NV_TYPE_DISABLED			1 +#define NV_TYPE_VXLAN				3 +#define SOCVID_SHIFT				2	/* Strip outer vlan */ +#define RCVID_SHIFT				4	/* Report vlan */ +	u8 nv_flags; +	u8 rsvd2; +	__le16 nv_port;					/* vxlan/gre port */ +	u32 rsvd3[19]; +} __packed; +  struct be_pcie_res_desc {  	struct be_res_desc_hdr hdr;  	u8 rsvd0; @@ -1765,6 +1882,8 @@ struct be_pcie_res_desc {  struct be_nic_res_desc {  	struct be_res_desc_hdr hdr;  	u8 rsvd1; + +#define QUN_SHIFT				4 /* QoS is in absolute units */  	u8 flags;  	u8 vf_num;  	u8 rsvd2; @@ -1782,18 +1901,39 @@ struct be_nic_res_desc {  	u16 cq_count;  	u16 toe_conn_count;  	u16 eq_count; -	u32 rsvd5; +	u16 vlan_id; +	u16 iface_count;  	u32 cap_flags;  	u8 link_param; -	u8 rsvd6[3]; +	u8 rsvd6; +	u16 channel_id_param;  	u32 bw_min;  	u32 bw_max;  	u8 acpi_params;  	u8 wol_param;  	u16 rsvd7; -	u32 rsvd8[3]; +	u16 tunnel_iface_count; +	u16 direct_tenant_iface_count; +	u32 rsvd8[6];  } __packed; +/************ Multi-Channel type ***********/ +enum mc_type { +	MC_NONE = 0x01, +	UMC = 0x02, +	FLEX10 = 0x03, +	vNIC1 = 0x04, +	nPAR = 0x05, +	UFP = 0x06, +	vNIC2 = 0x07 +}; + +/* Is BE in a multi-channel mode */ +static inline bool be_is_mc(struct be_adapter *adapter) +{ +	return adapter->mc_type > MC_NONE; +} +  struct be_cmd_req_get_func_config {  	struct be_cmd_req_hdr hdr;  }; @@ -1822,13 +1962,24 @@ struct be_cmd_req_set_profile_config {  	struct be_cmd_req_hdr hdr;  	u32 rsvd;  	u32 desc_count; -	struct be_nic_res_desc nic_desc; +	u8 desc[RESOURCE_DESC_SIZE_V1];  };  struct be_cmd_resp_set_profile_config {  	struct be_cmd_resp_hdr hdr;  }; +struct be_cmd_req_get_active_profile { +	struct be_cmd_req_hdr hdr; +	u32 rsvd; +} __packed; + +struct be_cmd_resp_get_active_profile { +	struct be_cmd_resp_hdr hdr; +	u16 active_profile_id; +	u16 next_profile_id; +} __packed; +  struct be_cmd_enable_disable_vf {  	struct be_cmd_req_hdr hdr;  	u8 enable; @@ -1863,137 +2014,157 @@ struct be_cmd_resp_get_iface_list {  	struct be_if_desc if_desc;  }; -extern int be_pci_fnum_get(struct be_adapter *adapter); -extern int be_fw_wait_ready(struct be_adapter *adapter); -extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, -				 bool permanent, u32 if_handle, u32 pmac_id); -extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, -			u32 if_id, u32 *pmac_id, u32 domain); -extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, -			int pmac_id, u32 domain); -extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, -			    u32 en_flags, u32 *if_handle, u32 domain); -extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, -			u32 domain); -extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); -extern int be_cmd_cq_create(struct be_adapter *adapter, -			struct be_queue_info *cq, struct be_queue_info *eq, -			bool no_delay, int num_cqe_dma_coalesce); -extern int be_cmd_mccq_create(struct be_adapter *adapter, -			struct be_queue_info *mccq, -			struct be_queue_info *cq); -extern int be_cmd_txq_create(struct be_adapter *adapter, -			struct be_tx_obj *txo); -extern int be_cmd_rxq_create(struct be_adapter *adapter, -			struct be_queue_info *rxq, u16 cq_id, -			u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); -extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, -			int type); -extern int be_cmd_rxq_destroy(struct be_adapter *adapter, -			struct be_queue_info *q); -extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, -				    u8 *link_status, u32 dom); -extern int be_cmd_reset(struct be_adapter *adapter); -extern int be_cmd_get_stats(struct be_adapter *adapter, -			struct be_dma_mem *nonemb_cmd); -extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter, -			struct be_dma_mem *nonemb_cmd); -extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, -		char *fw_on_flash); - -extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd); -extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, -			u16 *vtag_array, u32 num, bool untagged, -			bool promiscuous); -extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); -extern int be_cmd_set_flow_control(struct be_adapter *adapter, -			u32 tx_fc, u32 rx_fc); -extern int be_cmd_get_flow_control(struct be_adapter *adapter, -			u32 *tx_fc, u32 *rx_fc); -extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, +/*************** Set logical link ********************/ +#define PLINK_TRACK_SHIFT	8 +struct be_cmd_req_set_ll_link { +	struct be_cmd_req_hdr hdr; +	u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */ +}; + +/************** Manage IFACE Filters *******************/ +#define OP_CONVERT_NORMAL_TO_TUNNEL		0 +#define OP_CONVERT_TUNNEL_TO_NORMAL		1 + +struct be_cmd_req_manage_iface_filters { +	struct be_cmd_req_hdr hdr; +	u8  op; +	u8  rsvd0; +	u8  flags; +	u8  rsvd1; +	u32 tunnel_iface_id; +	u32 target_iface_id; +	u8  mac[6]; +	u16 vlan_tag; +	u32 tenant_id; +	u32 filter_id; +	u32 cap_flags; +	u32 cap_control_flags; +} __packed; + +int be_pci_fnum_get(struct be_adapter *adapter); +int be_fw_wait_ready(struct be_adapter *adapter); +int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, +			  bool permanent, u32 if_handle, u32 pmac_id); +int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, +		    u32 *pmac_id, u32 domain); +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, +		    u32 domain); +int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, +		     u32 *if_handle, u32 domain); +int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain); +int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo); +int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, +		     struct be_queue_info *eq, bool no_delay, +		     int num_cqe_dma_coalesce); +int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, +		       struct be_queue_info *cq); +int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo); +int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, +		      u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id); +int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, +		     int type); +int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); +int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, +			     u8 *link_status, u32 dom); +int be_cmd_reset(struct be_adapter *adapter); +int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd); +int lancer_cmd_get_pport_stats(struct be_adapter *adapter, +			       struct be_dma_mem *nonemb_cmd); +int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, +		      char *fw_on_flash); +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); +int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, +		       u32 num); +int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); +int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); +int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); +int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,  			u32 *function_mode, u32 *function_caps, u16 *asic_rev); -extern int be_cmd_reset_function(struct be_adapter *adapter); -extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, -			     u32 rss_hash_opts, u16 table_size); -extern int be_process_mcc(struct be_adapter *adapter); -extern int be_cmd_set_beacon_state(struct be_adapter *adapter, -			u8 port_num, u8 beacon, u8 status, u8 state); -extern int be_cmd_get_beacon_state(struct be_adapter *adapter, -			u8 port_num, u32 *state); -extern int be_cmd_write_flashrom(struct be_adapter *adapter, -			struct be_dma_mem *cmd, u32 flash_oper, -			u32 flash_opcode, u32 buf_size); -extern int lancer_cmd_write_object(struct be_adapter *adapter, -				   struct be_dma_mem *cmd, -				   u32 data_size, u32 data_offset, -				   const char *obj_name, -				   u32 *data_written, u8 *change_status, -				   u8 *addn_status); +int be_cmd_reset_function(struct be_adapter *adapter); +int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, +		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey); +int be_process_mcc(struct be_adapter *adapter); +int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, +			    u8 status, u8 state); +int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, +			    u32 *state); +int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, +			  u32 flash_oper, u32 flash_opcode, u32 buf_size); +int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, +			    u32 data_size, u32 data_offset, +			    const char *obj_name, u32 *data_written, +			    u8 *change_status, u8 *addn_status);  int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, -		u32 data_size, u32 data_offset, const char *obj_name, -		u32 *data_read, u32 *eof, u8 *addn_status); +			   u32 data_size, u32 data_offset, const char *obj_name, +			   u32 *data_read, u32 *eof, u8 *addn_status);  int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, -				int offset); -extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, -				struct be_dma_mem *nonemb_cmd); -extern int be_cmd_fw_init(struct be_adapter *adapter); -extern int be_cmd_fw_clean(struct be_adapter *adapter); -extern void be_async_mcc_enable(struct be_adapter *adapter); -extern void be_async_mcc_disable(struct be_adapter *adapter); -extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, -				u32 loopback_type, u32 pkt_size, -				u32 num_pkts, u64 pattern); -extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, -			u32 byte_cnt, struct be_dma_mem *cmd); -extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, -				struct be_dma_mem *nonemb_cmd); -extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, -				u8 loopback_type, u8 enable); -extern int be_cmd_get_phy_info(struct be_adapter *adapter); -extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); -extern void be_detect_error(struct be_adapter *adapter); -extern int be_cmd_get_die_temperature(struct be_adapter *adapter); -extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); -extern int be_cmd_req_native_mode(struct be_adapter *adapter); -extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); -extern int be_cmd_get_fn_privileges(struct be_adapter *adapter, -				    u32 *privilege, u32 domain); -extern int be_cmd_set_fn_privileges(struct be_adapter *adapter, -				    u32 privileges, u32 vf_num); -extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, -				    bool *pmac_id_active, u32 *pmac_id, -				    u8 domain); -extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, -				 u8 *mac); -extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); -extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, -						u8 mac_count, u32 domain); -extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, -			  u32 dom); -extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, -				 u32 domain, u16 intf_id, u16 hsw_mode); -extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, -				 u32 domain, u16 intf_id, u8 *mode); -extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); -extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, -					  struct be_dma_mem *cmd); -extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, -					  struct be_dma_mem *cmd, -					  struct be_fat_conf_params *cfgs); -extern int lancer_wait_ready(struct be_adapter *adapter); -extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); -extern int lancer_initiate_dump(struct be_adapter *adapter); -extern bool dump_present(struct be_adapter *adapter); -extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); -extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); +			  u16 optype, int offset); +int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, +			    struct be_dma_mem *nonemb_cmd); +int be_cmd_fw_init(struct be_adapter *adapter); +int be_cmd_fw_clean(struct be_adapter *adapter); +void be_async_mcc_enable(struct be_adapter *adapter); +void be_async_mcc_disable(struct be_adapter *adapter); +int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, +			 u32 loopback_type, u32 pkt_size, u32 num_pkts, +			 u64 pattern); +int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt, +			struct be_dma_mem *cmd); +int be_cmd_get_seeprom_data(struct be_adapter *adapter, +			    struct be_dma_mem *nonemb_cmd); +int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, +			u8 loopback_type, u8 enable); +int be_cmd_get_phy_info(struct be_adapter *adapter); +int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, +		      u16 link_speed, u8 domain); +void be_detect_error(struct be_adapter *adapter); +int be_cmd_get_die_temperature(struct be_adapter *adapter); +int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +int be_cmd_req_native_mode(struct be_adapter *adapter); +int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); +void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, +			     u32 domain); +int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, +			     u32 vf_num); +int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, +			     bool *pmac_id_active, u32 *pmac_id, +			     u32 if_handle, u8 domain); +int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac, +			  u32 if_handle, bool active, u32 domain); +int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac); +int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, +			u32 domain); +int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom); +int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, +			  u16 intf_id, u16 hsw_mode); +int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, +			  u16 intf_id, u8 *mode); +int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter); +int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level); +int be_cmd_get_fw_log_level(struct be_adapter *adapter); +int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, +				   struct be_dma_mem *cmd); +int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, +				   struct be_dma_mem *cmd, +				   struct be_fat_conf_params *cfgs); +int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask); +int lancer_initiate_dump(struct be_adapter *adapter); +bool dump_present(struct be_adapter *adapter); +int lancer_test_and_set_rdy_state(struct be_adapter *adapter); +int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);  int be_cmd_get_func_config(struct be_adapter *adapter,  			   struct be_resources *res);  int be_cmd_get_profile_config(struct be_adapter *adapter,  			      struct be_resources *res, u8 domain); -extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, -				     u8 domain); -extern int be_cmd_get_if_id(struct be_adapter *adapter, -			    struct be_vf_cfg *vf_cfg, int vf_num); -extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); -extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); +int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, +			      int size, u8 version, u8 domain); +int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); +int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, +		     int vf_num); +int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); +int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); +int be_cmd_set_logical_link_config(struct be_adapter *adapter, +					  int link_state, u8 domain); +int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); +int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b440a1fac77..e2da4d20dd3 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {  	{DRVSTAT_INFO(rx_drops_mtu)},  	/* Number of packets dropped due to random early drop function */  	{DRVSTAT_INFO(eth_red_drops)}, -	{DRVSTAT_INFO(be_on_die_temperature)} +	{DRVSTAT_INFO(be_on_die_temperature)}, +	{DRVSTAT_INFO(rx_roce_bytes_lsd)}, +	{DRVSTAT_INFO(rx_roce_bytes_msd)}, +	{DRVSTAT_INFO(rx_roce_frames)}, +	{DRVSTAT_INFO(roce_drops_payload_len)}, +	{DRVSTAT_INFO(roce_drops_crc)}  };  #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) @@ -127,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {  	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */  	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */  	{DRVSTAT_RX_INFO(rx_compl)}, +	{DRVSTAT_RX_INFO(rx_compl_err)},  	{DRVSTAT_RX_INFO(rx_mcast_pkts)},  	/* Number of page allocation failures while posting receive buffers  	 * to HW. @@ -155,7 +161,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {  	/* Number of times the TX queue was stopped due to lack  	 * of spaces in the TXQ.  	 */ -	{DRVSTAT_TX_INFO(tx_stops)} +	{DRVSTAT_TX_INFO(tx_stops)}, +	/* Pkts dropped in the driver's transmit path */ +	{DRVSTAT_TX_INFO(tx_drv_drops)}  };  #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) @@ -174,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {  #define BE_NO_LOOPBACK 0xff  static void be_get_drvinfo(struct net_device *netdev, -				struct ethtool_drvinfo *drvinfo) +			   struct ethtool_drvinfo *drvinfo)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -194,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,  	drvinfo->eedump_len = 0;  } -static u32 -lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) +static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)  {  	u32 data_read = 0, eof;  	u8 addn_status; @@ -205,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)  	memset(&data_len_cmd, 0, sizeof(data_len_cmd));  	/* data_offset and data_size should be 0 to get reg len */  	status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, -				file_name, &data_read, &eof, &addn_status); +					file_name, &data_read, &eof, +					&addn_status);  	return data_read;  } -static int -lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, -		u32 buf_len, void *buf) +static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, +				u32 buf_len, void *buf)  {  	struct be_dma_mem read_cmd;  	u32 read_len = 0, total_read_len = 0, chunk_size; @@ -222,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,  	read_cmd.size = LANCER_READ_FILE_CHUNK;  	read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, -			&read_cmd.dma); +					   &read_cmd.dma);  	if (!read_cmd.va) {  		dev_err(&adapter->pdev->dev, -				"Memory allocation failure while reading dump\n"); +			"Memory allocation failure while reading dump\n");  		return -ENOMEM;  	} @@ -235,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,  				LANCER_READ_FILE_CHUNK);  		chunk_size = ALIGN(chunk_size, 4);  		status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, -				total_read_len, file_name, &read_len, -				&eof, &addn_status); +						total_read_len, file_name, +						&read_len, &eof, &addn_status);  		if (!status) {  			memcpy(buf + total_read_len, read_cmd.va, read_len);  			total_read_len += read_len; @@ -247,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,  		}  	}  	pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, -			read_cmd.dma); +			    read_cmd.dma);  	return status;  } -static int -be_get_reg_len(struct net_device *netdev) +static int be_get_reg_len(struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	u32 log_size = 0; @@ -264,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)  	if (be_physfn(adapter)) {  		if (lancer_chip(adapter))  			log_size = lancer_cmd_get_file_len(adapter, -					LANCER_FW_DUMP_FILE); +							   LANCER_FW_DUMP_FILE);  		else  			be_cmd_get_reg_len(adapter, &log_size);  	} @@ -280,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)  		memset(buf, 0, regs->len);  		if (lancer_chip(adapter))  			lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, -					regs->len, buf); +					     regs->len, buf);  		else  			be_cmd_get_regs(adapter, regs->len, buf);  	} @@ -290,19 +296,19 @@ static int be_get_coalesce(struct net_device *netdev,  			   struct ethtool_coalesce *et)  {  	struct be_adapter *adapter = netdev_priv(netdev); -	struct be_eq_obj *eqo = &adapter->eq_obj[0]; +	struct be_aic_obj *aic = &adapter->aic_obj[0]; -	et->rx_coalesce_usecs = eqo->cur_eqd; -	et->rx_coalesce_usecs_high = eqo->max_eqd; -	et->rx_coalesce_usecs_low = eqo->min_eqd; +	et->rx_coalesce_usecs = aic->prev_eqd; +	et->rx_coalesce_usecs_high = aic->max_eqd; +	et->rx_coalesce_usecs_low = aic->min_eqd; -	et->tx_coalesce_usecs = eqo->cur_eqd; -	et->tx_coalesce_usecs_high = eqo->max_eqd; -	et->tx_coalesce_usecs_low = eqo->min_eqd; +	et->tx_coalesce_usecs = aic->prev_eqd; +	et->tx_coalesce_usecs_high = aic->max_eqd; +	et->tx_coalesce_usecs_low = aic->min_eqd; -	et->use_adaptive_rx_coalesce = eqo->enable_aic; -	et->use_adaptive_tx_coalesce = eqo->enable_aic; +	et->use_adaptive_rx_coalesce = aic->enable; +	et->use_adaptive_tx_coalesce = aic->enable;  	return 0;  } @@ -314,22 +320,24 @@ static int be_set_coalesce(struct net_device *netdev,  			   struct ethtool_coalesce *et)  {  	struct be_adapter *adapter = netdev_priv(netdev); +	struct be_aic_obj *aic = &adapter->aic_obj[0];  	struct be_eq_obj *eqo;  	int i;  	for_all_evt_queues(adapter, eqo, i) { -		eqo->enable_aic = et->use_adaptive_rx_coalesce; -		eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); -		eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); -		eqo->eqd = et->rx_coalesce_usecs; +		aic->enable = et->use_adaptive_rx_coalesce; +		aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); +		aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd); +		aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd); +		aic->et_eqd = max(aic->et_eqd, aic->min_eqd); +		aic++;  	}  	return 0;  } -static void -be_get_ethtool_stats(struct net_device *netdev, -		struct ethtool_stats *stats, uint64_t *data) +static void be_get_ethtool_stats(struct net_device *netdev, +				 struct ethtool_stats *stats, uint64_t *data)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	struct be_rx_obj *rxo; @@ -347,10 +355,10 @@ be_get_ethtool_stats(struct net_device *netdev,  		struct be_rx_stats *stats = rx_stats(rxo);  		do { -			start = u64_stats_fetch_begin_bh(&stats->sync); +			start = u64_stats_fetch_begin_irq(&stats->sync);  			data[base] = stats->rx_bytes;  			data[base + 1] = stats->rx_pkts; -		} while (u64_stats_fetch_retry_bh(&stats->sync, start)); +		} while (u64_stats_fetch_retry_irq(&stats->sync, start));  		for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {  			p = (u8 *)stats + et_rx_stats[i].offset; @@ -363,26 +371,25 @@ be_get_ethtool_stats(struct net_device *netdev,  		struct be_tx_stats *stats = tx_stats(txo);  		do { -			start = u64_stats_fetch_begin_bh(&stats->sync_compl); +			start = u64_stats_fetch_begin_irq(&stats->sync_compl);  			data[base] = stats->tx_compl; -		} while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); +		} while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));  		do { -			start = u64_stats_fetch_begin_bh(&stats->sync); +			start = u64_stats_fetch_begin_irq(&stats->sync);  			for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {  				p = (u8 *)stats + et_tx_stats[i].offset;  				data[base + i] =  					(et_tx_stats[i].size == sizeof(u64)) ?  						*(u64 *)p : *(u32 *)p;  			} -		} while (u64_stats_fetch_retry_bh(&stats->sync, start)); +		} while (u64_stats_fetch_retry_irq(&stats->sync, start));  		base += ETHTOOL_TXSTATS_NUM;  	}  } -static void -be_get_stat_strings(struct net_device *netdev, uint32_t stringset, -		uint8_t *data) +static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset, +				uint8_t *data)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	int i, j; @@ -632,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)  	adapter->rx_fc = ecmd->rx_pause;  	status = be_cmd_set_flow_control(adapter, -					adapter->tx_fc, adapter->rx_fc); +					 adapter->tx_fc, adapter->rx_fc);  	if (status)  		dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");  	return status;  } -static int -be_set_phys_id(struct net_device *netdev, -	       enum ethtool_phys_id_state state) +static int be_set_phys_id(struct net_device *netdev, +			  enum ethtool_phys_id_state state)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -698,43 +704,41 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)  	return status;  } -static void -be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)  {  	struct be_adapter *adapter = netdev_priv(netdev); -	if (be_is_wol_supported(adapter)) { +	if (adapter->wol_cap & BE_WOL_CAP) {  		wol->supported |= WAKE_MAGIC; -		if (adapter->wol) +		if (adapter->wol_en)  			wol->wolopts |= WAKE_MAGIC; -	} else +	} else {  		wol->wolopts = 0; +	}  	memset(&wol->sopass, 0, sizeof(wol->sopass));  } -static int -be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	if (wol->wolopts & ~WAKE_MAGIC)  		return -EOPNOTSUPP; -	if (!be_is_wol_supported(adapter)) { +	if (!(adapter->wol_cap & BE_WOL_CAP)) {  		dev_warn(&adapter->pdev->dev, "WOL not supported\n");  		return -EOPNOTSUPP;  	}  	if (wol->wolopts & WAKE_MAGIC) -		adapter->wol = true; +		adapter->wol_en = true;  	else -		adapter->wol = false; +		adapter->wol_en = false;  	return 0;  } -static int -be_test_ddr_dma(struct be_adapter *adapter) +static int be_test_ddr_dma(struct be_adapter *adapter)  {  	int ret, i;  	struct be_dma_mem ddrdma_cmd; @@ -750,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)  	for (i = 0; i < 2; i++) {  		ret = be_cmd_ddr_dma_test(adapter, pattern[i], -					4096, &ddrdma_cmd); +					  4096, &ddrdma_cmd);  		if (ret != 0)  			goto err;  	} @@ -762,20 +766,17 @@ err:  }  static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, -				u64 *status) +			    u64 *status)  { -	be_cmd_set_loopback(adapter, adapter->hba_port_num, -				loopback_type, 1); +	be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);  	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num, -				loopback_type, 1500, -				2, 0xabc); -	be_cmd_set_loopback(adapter, adapter->hba_port_num, -				BE_NO_LOOPBACK, 1); +				       loopback_type, 1500, 2, 0xabc); +	be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);  	return *status;  } -static void -be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) +static void be_self_test(struct net_device *netdev, struct ethtool_test *test, +			 u64 *data)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	int status; @@ -790,17 +791,17 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)  	memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);  	if (test->flags & ETH_TEST_FL_OFFLINE) { -		if (be_loopback_test(adapter, BE_MAC_LOOPBACK, -						&data[0]) != 0) { +		if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)  			test->flags |= ETH_TEST_FL_FAILED; -		} -		if (be_loopback_test(adapter, BE_PHY_LOOPBACK, -						&data[1]) != 0) { -			test->flags |= ETH_TEST_FL_FAILED; -		} -		if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, -						&data[2]) != 0) { + +		if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)  			test->flags |= ETH_TEST_FL_FAILED; + +		if (test->flags & ETH_TEST_FL_EXTERNAL_LB) { +			if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, +					     &data[2]) != 0) +				test->flags |= ETH_TEST_FL_FAILED; +			test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;  		}  	} @@ -819,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)  	}  } -static int -be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) +static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	return be_load_fw(adapter, efl->data);  } -static int -be_get_eeprom_len(struct net_device *netdev) +static int be_get_eeprom_len(struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -838,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)  	if (lancer_chip(adapter)) {  		if (be_physfn(adapter))  			return lancer_cmd_get_file_len(adapter, -					LANCER_VPD_PF_FILE); +						       LANCER_VPD_PF_FILE);  		else  			return lancer_cmd_get_file_len(adapter, -					LANCER_VPD_VF_FILE); +						       LANCER_VPD_VF_FILE);  	} else {  		return BE_READ_SEEPROM_LEN;  	}  } -static int -be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, -			uint8_t *data) +static int be_read_eeprom(struct net_device *netdev, +			  struct ethtool_eeprom *eeprom, uint8_t *data)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	struct be_dma_mem eeprom_cmd; @@ -862,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,  	if (lancer_chip(adapter)) {  		if (be_physfn(adapter))  			return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, -					eeprom->len, data); +						    eeprom->len, data);  		else  			return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, -					eeprom->len, data); +						    eeprom->len, data);  	}  	eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); @@ -894,73 +892,21 @@ static u32 be_get_msg_level(struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev); -	if (lancer_chip(adapter)) { -		dev_err(&adapter->pdev->dev, "Operation not supported\n"); -		return -EOPNOTSUPP; -	} -  	return adapter->msg_enable;  } -static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) -{ -	struct be_dma_mem extfat_cmd; -	struct be_fat_conf_params *cfgs; -	int status; -	int i, j; - -	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); -	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); -	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, -					     &extfat_cmd.dma); -	if (!extfat_cmd.va) { -		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", -			__func__); -		goto err; -	} -	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); -	if (!status) { -		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + -					sizeof(struct be_cmd_resp_hdr)); -		for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { -			u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); -			for (j = 0; j < num_modes; j++) { -				if (cfgs->module[i].trace_lvl[j].mode == -								MODE_UART) -					cfgs->module[i].trace_lvl[j].dbg_lvl = -							cpu_to_le32(level); -			} -		} -		status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, -							cfgs); -		if (status) -			dev_err(&adapter->pdev->dev, -				"Message level set failed\n"); -	} else { -		dev_err(&adapter->pdev->dev, "Message level get failed\n"); -	} - -	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, -			    extfat_cmd.dma); -err: -	return; -} -  static void be_set_msg_level(struct net_device *netdev, u32 level)  {  	struct be_adapter *adapter = netdev_priv(netdev); -	if (lancer_chip(adapter)) { -		dev_err(&adapter->pdev->dev, "Operation not supported\n"); -		return; -	} -  	if (adapter->msg_enable == level)  		return;  	if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) -		be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? -				    FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); +		if (BEx_chip(adapter)) +			be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ? +						FW_LOG_LEVEL_DEFAULT : +						FW_LOG_LEVEL_FATAL);  	adapter->msg_enable = level;  	return; @@ -972,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)  	switch (flow_type) {  	case TCP_V4_FLOW: -		if (adapter->rss_flags & RSS_ENABLE_IPV4) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)  			data |= RXH_IP_DST | RXH_IP_SRC; -		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)  			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;  		break;  	case UDP_V4_FLOW: -		if (adapter->rss_flags & RSS_ENABLE_IPV4) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)  			data |= RXH_IP_DST | RXH_IP_SRC; -		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)  			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;  		break;  	case TCP_V6_FLOW: -		if (adapter->rss_flags & RSS_ENABLE_IPV6) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)  			data |= RXH_IP_DST | RXH_IP_SRC; -		if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)  			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;  		break;  	case UDP_V6_FLOW: -		if (adapter->rss_flags & RSS_ENABLE_IPV6) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)  			data |= RXH_IP_DST | RXH_IP_SRC; -		if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6) +		if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)  			data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;  		break;  	} @@ -1001,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)  }  static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, -		      u32 *rule_locs) +			u32 *rule_locs)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -1031,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,  	struct be_rx_obj *rxo;  	int status = 0, i, j;  	u8 rsstable[128]; -	u32 rss_flags = adapter->rss_flags; +	u32 rss_flags = adapter->rss_info.rss_flags;  	if (cmd->data != L3_RSS_FLAGS &&  	    cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) @@ -1078,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,  		return -EINVAL;  	} -	if (rss_flags == adapter->rss_flags) +	if (rss_flags == adapter->rss_info.rss_flags)  		return status;  	if (be_multi_rxq(adapter)) { @@ -1090,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,  			}  		}  	} -	status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128); + +	status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, +				   rss_flags, 128, adapter->rss_info.rss_hkey);  	if (!status) -		adapter->rss_flags = rss_flags; +		adapter->rss_info.rss_flags = rss_flags;  	return status;  } @@ -1142,6 +1090,69 @@ static int be_set_channels(struct net_device  *netdev,  	return be_update_queues(adapter);  } +static u32 be_get_rxfh_indir_size(struct net_device *netdev) +{ +	return RSS_INDIR_TABLE_LEN; +} + +static u32 be_get_rxfh_key_size(struct net_device *netdev) +{ +	return RSS_HASH_KEY_LEN; +} + +static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey) +{ +	struct be_adapter *adapter = netdev_priv(netdev); +	int i; +	struct rss_info *rss = &adapter->rss_info; + +	if (indir) { +		for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) +			indir[i] = rss->rss_queue[i]; +	} + +	if (hkey) +		memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN); + +	return 0; +} + +static int be_set_rxfh(struct net_device *netdev, const u32 *indir, +		       const u8 *hkey) +{ +	int rc = 0, i, j; +	struct be_adapter *adapter = netdev_priv(netdev); +	u8 rsstable[RSS_INDIR_TABLE_LEN]; + +	if (indir) { +		struct be_rx_obj *rxo; +		for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) { +			j = indir[i]; +			rxo = &adapter->rx_obj[j]; +			rsstable[i] = rxo->rss_id; +			adapter->rss_info.rss_queue[i] = j; +		} +	} else { +		memcpy(rsstable, adapter->rss_info.rsstable, +		       RSS_INDIR_TABLE_LEN); +	} + +	if (!hkey) +		hkey =  adapter->rss_info.rss_hkey; + +	rc = be_cmd_rss_config(adapter, rsstable, +			adapter->rss_info.rss_flags, +			RSS_INDIR_TABLE_LEN, hkey); +	if (rc) { +		adapter->rss_info.rss_flags = RSS_ENABLE_NONE; +		return -EIO; +	} +	memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN); +	memcpy(adapter->rss_info.rsstable, rsstable, +	       RSS_INDIR_TABLE_LEN); +	return 0; +} +  const struct ethtool_ops be_ethtool_ops = {  	.get_settings = be_get_settings,  	.get_drvinfo = be_get_drvinfo, @@ -1168,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {  	.self_test = be_self_test,  	.get_rxnfc = be_get_rxnfc,  	.set_rxnfc = be_set_rxnfc, +	.get_rxfh_indir_size = be_get_rxfh_indir_size, +	.get_rxfh_key_size = be_get_rxfh_key_size, +	.get_rxfh = be_get_rxfh, +	.set_rxfh = be_set_rxfh,  	.get_channels = be_get_channels,  	.set_channels = be_set_channels  }; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 3e216212160..8840c64aaec 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -64,6 +64,9 @@  #define SLIPORT_ERROR_NO_RESOURCE1	0x2  #define SLIPORT_ERROR_NO_RESOURCE2	0x9 +#define SLIPORT_ERROR_FW_RESET1		0x2 +#define SLIPORT_ERROR_FW_RESET2		0x0 +  /********* Memory BAR register ************/  #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 	0xfc  /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt @@ -185,10 +188,14 @@  #define OPTYPE_FCOE_FW_ACTIVE		10  #define OPTYPE_FCOE_FW_BACKUP		11  #define OPTYPE_NCSI_FW			13 +#define OPTYPE_REDBOOT_DIR		18 +#define OPTYPE_REDBOOT_CONFIG		19 +#define OPTYPE_SH_PHY_FW		21 +#define OPTYPE_FLASHISM_JUMPVECTOR	22 +#define OPTYPE_UFI_DIR			23  #define OPTYPE_PHY_FW			99  #define TN_8022				13 -#define ILLEGAL_IOCTL_REQ		2  #define FLASHROM_OPER_PHY_FLASH		9  #define FLASHROM_OPER_PHY_SAVE		10  #define FLASHROM_OPER_FLASH		1 @@ -247,6 +254,9 @@  #define IMAGE_FIRMWARE_BACKUP_FCoE	178  #define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179  #define IMAGE_FIRMWARE_PHY		192 +#define IMAGE_REDBOOT_DIR		208 +#define IMAGE_REDBOOT_CONFIG		209 +#define IMAGE_UFI_DIR			210  #define IMAGE_BOOT_CODE			224  /************* Rx Packet Type Encoding **************/ @@ -365,7 +375,7 @@ struct amap_eth_rx_compl_v0 {  	u8 numfrags[3];		/* dword 1 */  	u8 rss_flush;		/* dword 2 */  	u8 cast_enc[2];		/* dword 2 */ -	u8 vtm;			/* dword 2 */ +	u8 qnq;			/* dword 2 */  	u8 rss_bank;		/* dword 2 */  	u8 rsvd1[23];		/* dword 2 */  	u8 lro_pkt;		/* dword 2 */ @@ -398,13 +408,14 @@ struct amap_eth_rx_compl_v1 {  	u8 numfrags[3];		/* dword 1 */  	u8 rss_flush;		/* dword 2 */  	u8 cast_enc[2];		/* dword 2 */ -	u8 vtm;			/* dword 2 */ +	u8 qnq;			/* dword 2 */  	u8 rss_bank;		/* dword 2 */  	u8 port[2];		/* dword 2 */  	u8 vntagp;		/* dword 2 */  	u8 header_len[8];	/* dword 2 */  	u8 header_split[2];	/* dword 2 */ -	u8 rsvd1[13];		/* dword 2 */ +	u8 rsvd1[12];		/* dword 2 */ +	u8 tunneled;  	u8 valid;		/* dword 2 */  	u8 rsshash[32];		/* dword 3 */  } __packed; @@ -530,7 +541,8 @@ struct flash_section_entry {  	u32 image_size;  	u32 cksum;  	u32 entry_point; -	u32 rsvd0; +	u16 optype; +	u16 rsvd0;  	u32 rsvd1;  	u8 ver_data[32];  } __packed; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3224d28cdad..1e187fb760f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -22,6 +22,8 @@  #include <asm/div64.h>  #include <linux/aer.h>  #include <linux/if_bridge.h> +#include <net/busy_poll.h> +#include <net/vxlan.h>  MODULE_VERSION(DRV_VER);  MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -120,12 +122,6 @@ static const char * const ue_status_hi_desc[] = {  	"Unknown"  }; -/* Is BE in a multi-channel mode */ -static inline bool be_is_mc(struct be_adapter *adapter) { -	return (adapter->function_mode & FLEX10_MODE || -		adapter->function_mode & VNIC_MODE || -		adapter->function_mode & UMC_ENABLED); -}  static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)  { @@ -138,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)  }  static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, -		u16 len, u16 entry_size) +			  u16 len, u16 entry_size)  {  	struct be_dma_mem *mem = &q->dma_mem; @@ -158,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)  	u32 reg, enabled;  	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, -				®); +			      ®);  	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;  	if (!enabled && enable) @@ -169,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)  		return;  	pci_write_config_dword(adapter->pdev, -			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); +			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);  }  static void be_intr_set(struct be_adapter *adapter, bool enable) @@ -210,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,  }  static void be_eq_notify(struct be_adapter *adapter, u16 qid, -		bool arm, bool clear_int, u16 num_popped) +			 bool arm, bool clear_int, u16 num_popped)  {  	u32 val = 0;  	val |= qid & DB_EQ_RING_ID_MASK; -	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << -			DB_EQ_RING_ID_EXT_MASK_SHIFT); +	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);  	if (adapter->eeh_error)  		return; @@ -257,6 +252,12 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)  	if (!is_valid_ether_addr(addr->sa_data))  		return -EADDRNOTAVAIL; +	/* Proceed further only if, User provided MAC is different +	 * from active MAC +	 */ +	if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) +		return 0; +  	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT  	 * privilege or if PF did not provision the new MAC address.  	 * On BE3, this cmd will always fail if the VF doesn't have the @@ -279,14 +280,15 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)  	/* Decide if the new MAC is successfully activated only after  	 * querying the FW  	 */ -	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac); +	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac, +				       adapter->if_handle, true, 0);  	if (status)  		goto err;  	/* The MAC change did not happen, either due to lack of privilege  	 * or PF didn't pre-provision.  	 */ -	if (memcmp(addr->sa_data, mac, ETH_ALEN)) { +	if (!ether_addr_equal(addr->sa_data, mac)) {  		status = -EPERM;  		goto err;  	} @@ -306,10 +308,14 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)  		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;  		return &cmd->hw_stats; -	} else  { +	} else if (BE3_chip(adapter)) {  		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;  		return &cmd->hw_stats; +	} else { +		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va; + +		return &cmd->hw_stats;  	}  } @@ -320,10 +326,14 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)  		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);  		return &hw_stats->erx; -	} else { +	} else if (BE3_chip(adapter)) {  		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);  		return &hw_stats->erx; +	} else { +		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); + +		return &hw_stats->erx;  	}  } @@ -422,12 +432,65 @@ static void populate_be_v1_stats(struct be_adapter *adapter)  	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;  } +static void populate_be_v2_stats(struct be_adapter *adapter) +{ +	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); +	struct be_pmem_stats *pmem_sts = &hw_stats->pmem; +	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf; +	struct be_port_rxf_stats_v2 *port_stats = +					&rxf_stats->port[adapter->port_num]; +	struct be_drv_stats *drvs = &adapter->drv_stats; + +	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); +	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; +	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; +	drvs->rx_pause_frames = port_stats->rx_pause_frames; +	drvs->rx_crc_errors = port_stats->rx_crc_errors; +	drvs->rx_control_frames = port_stats->rx_control_frames; +	drvs->rx_in_range_errors = port_stats->rx_in_range_errors; +	drvs->rx_frame_too_long = port_stats->rx_frame_too_long; +	drvs->rx_dropped_runt = port_stats->rx_dropped_runt; +	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; +	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; +	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; +	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; +	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; +	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; +	drvs->rx_out_range_errors = port_stats->rx_out_range_errors; +	drvs->rx_dropped_header_too_small = +		port_stats->rx_dropped_header_too_small; +	drvs->rx_input_fifo_overflow_drop = +		port_stats->rx_input_fifo_overflow_drop; +	drvs->rx_address_filtered = port_stats->rx_address_filtered; +	drvs->rx_alignment_symbol_errors = +		port_stats->rx_alignment_symbol_errors; +	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; +	drvs->tx_pauseframes = port_stats->tx_pauseframes; +	drvs->tx_controlframes = port_stats->tx_controlframes; +	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; +	drvs->jabber_events = port_stats->jabber_events; +	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; +	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; +	drvs->forwarded_packets = rxf_stats->forwarded_packets; +	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; +	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; +	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; +	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; +	if (be_roce_supported(adapter)) { +		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; +		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; +		drvs->rx_roce_frames = port_stats->roce_frames_received; +		drvs->roce_drops_crc = port_stats->roce_drops_crc; +		drvs->roce_drops_payload_len = +			port_stats->roce_drops_payload_len; +	} +} +  static void populate_lancer_stats(struct be_adapter *adapter)  {  	struct be_drv_stats *drvs = &adapter->drv_stats; -	struct lancer_pport_stats *pport_stats = -					pport_stats_from_cmd(adapter); +	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);  	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));  	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; @@ -474,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)  }  static void populate_erx_stats(struct be_adapter *adapter, -			struct be_rx_obj *rxo, -			u32 erx_stat) +			       struct be_rx_obj *rxo, u32 erx_stat)  {  	if (!BEx_chip(adapter))  		rx_stats(rxo)->rx_drops_no_frags = erx_stat; @@ -489,7 +551,7 @@ static void populate_erx_stats(struct be_adapter *adapter,  void be_parse_stats(struct be_adapter *adapter)  { -	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); +	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);  	struct be_rx_obj *rxo;  	int i;  	u32 erx_stat; @@ -499,11 +561,13 @@ void be_parse_stats(struct be_adapter *adapter)  	} else {  		if (BE2_chip(adapter))  			populate_be_v0_stats(adapter); -		else -			/* for BE3 and Skyhawk */ +		else if (BE3_chip(adapter)) +			/* for BE3 */  			populate_be_v1_stats(adapter); +		else +			populate_be_v2_stats(adapter); -		/* as erx_v1 is longer than v0, ok to use v1 for v0 access */ +		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */  		for_all_rx_queues(adapter, rxo, i) {  			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];  			populate_erx_stats(adapter, rxo, erx_stat); @@ -512,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)  }  static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, -					struct rtnl_link_stats64 *stats) +						struct rtnl_link_stats64 *stats)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	struct be_drv_stats *drvs = &adapter->drv_stats; @@ -525,10 +589,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,  	for_all_rx_queues(adapter, rxo, i) {  		const struct be_rx_stats *rx_stats = rx_stats(rxo);  		do { -			start = u64_stats_fetch_begin_bh(&rx_stats->sync); +			start = u64_stats_fetch_begin_irq(&rx_stats->sync);  			pkts = rx_stats(rxo)->rx_pkts;  			bytes = rx_stats(rxo)->rx_bytes; -		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); +		} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));  		stats->rx_packets += pkts;  		stats->rx_bytes += bytes;  		stats->multicast += rx_stats(rxo)->rx_mcast_pkts; @@ -539,10 +603,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,  	for_all_tx_queues(adapter, txo, i) {  		const struct be_tx_stats *tx_stats = tx_stats(txo);  		do { -			start = u64_stats_fetch_begin_bh(&tx_stats->sync); +			start = u64_stats_fetch_begin_irq(&tx_stats->sync);  			pkts = tx_stats(txo)->tx_pkts;  			bytes = tx_stats(txo)->tx_bytes; -		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); +		} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));  		stats->tx_packets += pkts;  		stats->tx_bytes += bytes;  	} @@ -586,14 +650,15 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)  		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;  	} -	if ((link_status & LINK_STATUS_MASK) == LINK_UP) +	if (link_status)  		netif_carrier_on(netdev);  	else  		netif_carrier_off(netdev);  }  static void be_tx_stats_update(struct be_tx_obj *txo, -			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) +			       u32 wrb_cnt, u32 copied, u32 gso_segs, +			       bool stopped)  {  	struct be_tx_stats *stats = tx_stats(txo); @@ -609,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,  /* Determine number of WRB entries needed to xmit data in an skb */  static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, -								bool *dummy) +			   bool *dummy)  {  	int cnt = (skb->len > skb->data_len); @@ -637,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)  }  static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, -					struct sk_buff *skb) +				     struct sk_buff *skb)  {  	u8 vlan_prio;  	u16 vlan_tag; @@ -652,10 +717,24 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,  	return vlan_tag;  } +/* Used only for IP tunnel packets */ +static u16 skb_inner_ip_proto(struct sk_buff *skb) +{ +	return (inner_ip_hdr(skb)->version == 4) ? +		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr; +} + +static u16 skb_ip_proto(struct sk_buff *skb) +{ +	return (ip_hdr(skb)->version == 4) ? +		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; +} +  static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, -		struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) +			 struct sk_buff *skb, u32 wrb_cnt, u32 len, +			 bool skip_hw_vlan)  { -	u16 vlan_tag; +	u16 vlan_tag, proto;  	memset(hdr, 0, sizeof(*hdr)); @@ -668,9 +747,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,  		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))  			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);  	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { -		if (is_tcp_pkt(skb)) +		if (skb->encapsulation) { +			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); +			proto = skb_inner_ip_proto(skb); +		} else { +			proto = skb_ip_proto(skb); +		} +		if (proto == IPPROTO_TCP)  			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); -		else if (is_udp_pkt(skb)) +		else if (proto == IPPROTO_UDP)  			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);  	} @@ -688,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,  }  static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, -		bool unmap_single) +			  bool unmap_single)  {  	dma_addr_t dma; @@ -705,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,  }  static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, -		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, -		bool skip_hw_vlan) +			struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, +			bool skip_hw_vlan)  {  	dma_addr_t busaddr;  	int i, copied = 0; @@ -735,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,  	}  	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { -		const struct skb_frag_struct *frag = -			&skb_shinfo(skb)->frags[i]; +		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];  		busaddr = skb_frag_dma_map(dev, frag, 0,  					   skb_frag_size(frag), DMA_TO_DEVICE);  		if (dma_mapping_error(dev, busaddr)) @@ -841,30 +925,19 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)  	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;  } -static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, -				struct sk_buff *skb) +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)  {  	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);  } -static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, -					   struct sk_buff *skb, -					   bool *skip_hw_vlan) +static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, +						  struct sk_buff *skb, +						  bool *skip_hw_vlan)  {  	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;  	unsigned int eth_hdr_len;  	struct iphdr *ip; -	/* Lancer ASIC has a bug wherein packets that are 32 bytes or less -	 * may cause a transmit stall on that port. So the work-around is to -	 * pad such packets to a 36-byte length. -	 */ -	if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { -		if (skb_padto(skb, 36)) -			goto tx_drop; -		skb->len = 36; -	} -  	/* For padded packets, BE HW modifies tot_len field in IP header  	 * incorrecly when VLAN tag is inserted by HW.  	 * For padded packets, Lancer computes incorrect checksum. @@ -879,11 +952,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,  	}  	/* If vlan tag is already inlined in the packet, skip HW VLAN -	 * tagging in UMC mode +	 * tagging in pvid-tagging mode  	 */ -	if ((adapter->function_mode & UMC_ENABLED) && +	if (be_pvid_tagging_enabled(adapter) &&  	    veh->h_vlan_proto == htons(ETH_P_8021Q)) -			*skip_hw_vlan = true; +		*skip_hw_vlan = true;  	/* HW has a bug wherein it will calculate CSUM for VLAN  	 * pkts even though it is disabled. @@ -893,7 +966,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,  	    vlan_tx_tag_present(skb)) {  		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);  		if (unlikely(!skb)) -			goto tx_drop; +			goto err;  	}  	/* HW may lockup when VLAN HW tagging is requested on @@ -915,15 +988,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,  	    be_vlan_tag_tx_chk(adapter, skb)) {  		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);  		if (unlikely(!skb)) -			goto tx_drop; +			goto err;  	}  	return skb;  tx_drop:  	dev_kfree_skb_any(skb); +err:  	return NULL;  } +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, +					   struct sk_buff *skb, +					   bool *skip_hw_vlan) +{ +	/* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or +	 * less may cause a transmit stall on that port. So the work-around is +	 * to pad short packets (<= 32 bytes) to a 36-byte length. +	 */ +	if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { +		if (skb_padto(skb, 36)) +			return NULL; +		skb->len = 36; +	} + +	if (BEx_chip(adapter) || lancer_chip(adapter)) { +		skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); +		if (!skb) +			return NULL; +	} + +	return skb; +} +  static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -935,8 +1032,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)  	u32 start = txq->head;  	skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); -	if (!skb) +	if (!skb) { +		tx_stats(txo)->tx_drv_drops++;  		return NETDEV_TX_OK; +	}  	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); @@ -965,6 +1064,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)  		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);  	} else {  		txq->head = start; +		tx_stats(txo)->tx_drv_drops++;  		dev_kfree_skb_any(skb);  	}  	return NETDEV_TX_OK; @@ -974,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	if (new_mtu < BE_MIN_MTU || -			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - -					(ETH_HLEN + ETH_FCS_LEN))) { +	    new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {  		dev_info(&adapter->pdev->dev, -			"MTU must be between %d and %d bytes\n", -			BE_MIN_MTU, -			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); +			 "MTU must be between %d and %d bytes\n", +			 BE_MIN_MTU, +			 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));  		return -EINVAL;  	}  	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", -			netdev->mtu, new_mtu); +		 netdev->mtu, new_mtu);  	netdev->mtu = new_mtu;  	return 0;  } @@ -995,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)  static int be_vid_config(struct be_adapter *adapter)  {  	u16 vids[BE_NUM_VLANS_SUPPORTED]; -	u16 num = 0, i; +	u16 num = 0, i = 0;  	int status = 0;  	/* No need to further configure vids if in promiscuous mode */ @@ -1006,25 +1105,43 @@ static int be_vid_config(struct be_adapter *adapter)  		goto set_vlan_promisc;  	/* Construct VLAN Table to give to HW */ -	for (i = 0; i < VLAN_N_VID; i++) -		if (adapter->vlan_tag[i]) -			vids[num++] = cpu_to_le16(i); - -	status = be_cmd_vlan_config(adapter, adapter->if_handle, -				    vids, num, 1, 0); +	for_each_set_bit(i, adapter->vids, VLAN_N_VID) +		vids[num++] = cpu_to_le16(i); -	/* Set to VLAN promisc mode as setting VLAN filter failed */ +	status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);  	if (status) { -		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n"); -		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n"); -		goto set_vlan_promisc; +		/* Set to VLAN promisc mode as setting VLAN filter failed */ +		if (addl_status(status) == +				MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) +			goto set_vlan_promisc; +		dev_err(&adapter->pdev->dev, +			"Setting HW VLAN filtering failed.\n"); +	} else { +		if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { +			/* hw VLAN filtering re-enabled. */ +			status = be_cmd_rx_filter(adapter, +						  BE_FLAGS_VLAN_PROMISC, OFF); +			if (!status) { +				dev_info(&adapter->pdev->dev, +					 "Disabling VLAN Promiscuous mode.\n"); +				adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; +			} +		}  	}  	return status;  set_vlan_promisc: -	status = be_cmd_vlan_config(adapter, adapter->if_handle, -				    NULL, 0, 1, 1); +	if (adapter->flags & BE_FLAGS_VLAN_PROMISC) +		return 0; + +	status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); +	if (!status) { +		dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); +		adapter->flags |= BE_FLAGS_VLAN_PROMISC; +	} else +		dev_err(&adapter->pdev->dev, +			"Failed to enable VLAN Promiscuous mode.\n");  	return status;  } @@ -1033,24 +1150,22 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)  	struct be_adapter *adapter = netdev_priv(netdev);  	int status = 0; -	if (!lancer_chip(adapter) && !be_physfn(adapter)) { -		status = -EINVAL; -		goto ret; -	} -  	/* Packets with VID 0 are always received by Lancer by default */  	if (lancer_chip(adapter) && vid == 0) -		goto ret; +		return status; -	adapter->vlan_tag[vid] = 1; -	if (adapter->vlans_added <= (be_max_vlans(adapter) + 1)) -		status = be_vid_config(adapter); +	if (test_bit(vid, adapter->vids)) +		return status; + +	set_bit(vid, adapter->vids); +	adapter->vlans_added++; + +	status = be_vid_config(adapter); +	if (status) { +		adapter->vlans_added--; +		clear_bit(vid, adapter->vids); +	} -	if (!status) -		adapter->vlans_added++; -	else -		adapter->vlan_tag[vid] = 0; -ret:  	return status;  } @@ -1059,27 +1174,28 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)  	struct be_adapter *adapter = netdev_priv(netdev);  	int status = 0; -	if (!lancer_chip(adapter) && !be_physfn(adapter)) { -		status = -EINVAL; -		goto ret; -	} -  	/* Packets with VID 0 are always received by Lancer by default */  	if (lancer_chip(adapter) && vid == 0)  		goto ret; -	adapter->vlan_tag[vid] = 0; -	if (adapter->vlans_added <= be_max_vlans(adapter)) -		status = be_vid_config(adapter); - +	clear_bit(vid, adapter->vids); +	status = be_vid_config(adapter);  	if (!status)  		adapter->vlans_added--;  	else -		adapter->vlan_tag[vid] = 1; +		set_bit(vid, adapter->vids);  ret:  	return status;  } +static void be_clear_promisc(struct be_adapter *adapter) +{ +	adapter->promiscuous = false; +	adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC); + +	be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); +} +  static void be_set_rx_mode(struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev); @@ -1093,19 +1209,15 @@ static void be_set_rx_mode(struct net_device *netdev)  	/* BE was previously in promiscuous mode; disable it */  	if (adapter->promiscuous) { -		adapter->promiscuous = false; -		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); - +		be_clear_promisc(adapter);  		if (adapter->vlans_added)  			be_vid_config(adapter);  	}  	/* Enable multicast promisc if num configured exceeds what we support */  	if (netdev->flags & IFF_ALLMULTI || -	    netdev_mc_count(netdev) > be_max_mc(adapter)) { -		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); -		goto done; -	} +	    netdev_mc_count(netdev) > be_max_mc(adapter)) +		goto set_mcast_promisc;  	if (netdev_uc_count(netdev) != adapter->uc_macs) {  		struct netdev_hw_addr *ha; @@ -1131,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)  	}  	status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); - -	/* Set to MCAST promisc mode if setting MULTICAST address fails */ -	if (status) { -		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); -		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n"); -		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); +	if (!status) { +		if (adapter->flags & BE_FLAGS_MCAST_PROMISC) +			adapter->flags &= ~BE_FLAGS_MCAST_PROMISC; +		goto done;  	} + +set_mcast_promisc: +	if (adapter->flags & BE_FLAGS_MCAST_PROMISC) +		return; + +	/* Set to MCAST promisc mode if setting MULTICAST address fails +	 * or if num configured exceeds what we support +	 */ +	status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); +	if (!status) +		adapter->flags |= BE_FLAGS_MCAST_PROMISC;  done:  	return;  } @@ -1167,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)  	if (status)  		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", -				mac, vf); +			mac, vf);  	else  		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); @@ -1175,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)  }  static int be_get_vf_config(struct net_device *netdev, int vf, -			struct ifla_vf_info *vi) +			    struct ifla_vf_info *vi)  {  	struct be_adapter *adapter = netdev_priv(netdev);  	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; @@ -1187,54 +1308,55 @@ static int be_get_vf_config(struct net_device *netdev, int vf,  		return -EINVAL;  	vi->vf = vf; -	vi->tx_rate = vf_cfg->tx_rate; -	vi->vlan = vf_cfg->vlan_tag; -	vi->qos = 0; +	vi->max_tx_rate = vf_cfg->tx_rate; +	vi->min_tx_rate = 0; +	vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; +	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;  	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); +	vi->linkstate = adapter->vf_cfg[vf].plink_tracking;  	return 0;  } -static int be_set_vf_vlan(struct net_device *netdev, -			int vf, u16 vlan, u8 qos) +static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)  {  	struct be_adapter *adapter = netdev_priv(netdev); +	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];  	int status = 0;  	if (!sriov_enabled(adapter))  		return -EPERM; -	if (vf >= adapter->num_vfs || vlan > 4095) +	if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)  		return -EINVAL; -	if (vlan) { -		if (adapter->vf_cfg[vf].vlan_tag != vlan) { -			/* If this is new value, program it. Else skip. */ -			adapter->vf_cfg[vf].vlan_tag = vlan; - -			status = be_cmd_set_hsw_config(adapter, vlan, -				vf + 1, adapter->vf_cfg[vf].if_handle, 0); -		} +	if (vlan || qos) { +		vlan |= qos << VLAN_PRIO_SHIFT; +		if (vf_cfg->vlan_tag != vlan) +			status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, +						       vf_cfg->if_handle, 0);  	} else {  		/* Reset Transparent Vlan Tagging. */ -		adapter->vf_cfg[vf].vlan_tag = 0; -		vlan = adapter->vf_cfg[vf].def_vid; -		status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, -			adapter->vf_cfg[vf].if_handle, 0); +		status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, +					       vf + 1, vf_cfg->if_handle, 0);  	} - -	if (status) +	if (!status) +		vf_cfg->vlan_tag = vlan; +	else  		dev_info(&adapter->pdev->dev, -				"VLAN %d config on VF %d failed\n", vlan, vf); +			 "VLAN %d config on VF %d failed\n", vlan, vf);  	return status;  } -static int be_set_vf_tx_rate(struct net_device *netdev, -			int vf, int rate) +static int be_set_vf_tx_rate(struct net_device *netdev, int vf, +			     int min_tx_rate, int max_tx_rate)  {  	struct be_adapter *adapter = netdev_priv(netdev); -	int status = 0; +	struct device *dev = &adapter->pdev->dev; +	int percent_rate, status = 0; +	u16 link_speed = 0; +	u8 link_status;  	if (!sriov_enabled(adapter))  		return -EPERM; @@ -1242,76 +1364,148 @@ static int be_set_vf_tx_rate(struct net_device *netdev,  	if (vf >= adapter->num_vfs)  		return -EINVAL; -	if (rate < 100 || rate > 10000) { -		dev_err(&adapter->pdev->dev, -			"tx rate must be between 100 and 10000 Mbps\n"); +	if (min_tx_rate)  		return -EINVAL; + +	if (!max_tx_rate) +		goto config_qos; + +	status = be_cmd_link_status_query(adapter, &link_speed, +					  &link_status, 0); +	if (status) +		goto err; + +	if (!link_status) { +		dev_err(dev, "TX-rate setting not allowed when link is down\n"); +		status = -EPERM; +		goto err;  	} -	if (lancer_chip(adapter)) -		status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1); -	else -		status = be_cmd_set_qos(adapter, rate / 10, vf + 1); +	if (max_tx_rate < 100 || max_tx_rate > link_speed) { +		dev_err(dev, "TX-rate must be between 100 and %d Mbps\n", +			link_speed); +		status = -EINVAL; +		goto err; +	} + +	/* On Skyhawk the QOS setting must be done only as a % value */ +	percent_rate = link_speed / 100; +	if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) { +		dev_err(dev, "TX-rate must be a multiple of %d Mbps\n", +			percent_rate); +		status = -EINVAL; +		goto err; +	} +config_qos: +	status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);  	if (status) -		dev_err(&adapter->pdev->dev, -				"tx rate %d on VF %d failed\n", rate, vf); -	else -		adapter->vf_cfg[vf].tx_rate = rate; +		goto err; + +	adapter->vf_cfg[vf].tx_rate = max_tx_rate; +	return 0; + +err: +	dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n", +		max_tx_rate, vf);  	return status;  } +static int be_set_vf_link_state(struct net_device *netdev, int vf, +				int link_state) +{ +	struct be_adapter *adapter = netdev_priv(netdev); +	int status; -static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo) +	if (!sriov_enabled(adapter)) +		return -EPERM; + +	if (vf >= adapter->num_vfs) +		return -EINVAL; + +	status = be_cmd_set_logical_link_config(adapter, link_state, vf+1); +	if (!status) +		adapter->vf_cfg[vf].plink_tracking = link_state; + +	return status; +} + +static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, +			  ulong now)  { -	struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]); -	ulong now = jiffies; -	ulong delta = now - stats->rx_jiffies; -	u64 pkts; -	unsigned int start, eqd; +	aic->rx_pkts_prev = rx_pkts; +	aic->tx_reqs_prev = tx_pkts; +	aic->jiffies = now; +} -	if (!eqo->enable_aic) { -		eqd = eqo->eqd; -		goto modify_eqd; -	} +static void be_eqd_update(struct be_adapter *adapter) +{ +	struct be_set_eqd set_eqd[MAX_EVT_QS]; +	int eqd, i, num = 0, start; +	struct be_aic_obj *aic; +	struct be_eq_obj *eqo; +	struct be_rx_obj *rxo; +	struct be_tx_obj *txo; +	u64 rx_pkts, tx_pkts; +	ulong now; +	u32 pps, delta; -	if (eqo->idx >= adapter->num_rx_qs) -		return; +	for_all_evt_queues(adapter, eqo, i) { +		aic = &adapter->aic_obj[eqo->idx]; +		if (!aic->enable) { +			if (aic->jiffies) +				aic->jiffies = 0; +			eqd = aic->et_eqd; +			goto modify_eqd; +		} -	stats = rx_stats(&adapter->rx_obj[eqo->idx]); +		rxo = &adapter->rx_obj[eqo->idx]; +		do { +			start = u64_stats_fetch_begin_irq(&rxo->stats.sync); +			rx_pkts = rxo->stats.rx_pkts; +		} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); -	/* Wrapped around */ -	if (time_before(now, stats->rx_jiffies)) { -		stats->rx_jiffies = now; -		return; -	} +		txo = &adapter->tx_obj[eqo->idx]; +		do { +			start = u64_stats_fetch_begin_irq(&txo->stats.sync); +			tx_pkts = txo->stats.tx_reqs; +		} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); -	/* Update once a second */ -	if (delta < HZ) -		return; -	do { -		start = u64_stats_fetch_begin_bh(&stats->sync); -		pkts = stats->rx_pkts; -	} while (u64_stats_fetch_retry_bh(&stats->sync, start)); - -	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ); -	stats->rx_pkts_prev = pkts; -	stats->rx_jiffies = now; -	eqd = (stats->rx_pps / 110000) << 3; -	eqd = min(eqd, eqo->max_eqd); -	eqd = max(eqd, eqo->min_eqd); -	if (eqd < 10) -		eqd = 0; +		/* Skip, if wrapped around or first calculation */ +		now = jiffies; +		if (!aic->jiffies || time_before(now, aic->jiffies) || +		    rx_pkts < aic->rx_pkts_prev || +		    tx_pkts < aic->tx_reqs_prev) { +			be_aic_update(aic, rx_pkts, tx_pkts, now); +			continue; +		} + +		delta = jiffies_to_msecs(now - aic->jiffies); +		pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) + +			(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta); +		eqd = (pps / 15000) << 2; +		if (eqd < 8) +			eqd = 0; +		eqd = min_t(u32, eqd, aic->max_eqd); +		eqd = max_t(u32, eqd, aic->min_eqd); + +		be_aic_update(aic, rx_pkts, tx_pkts, now);  modify_eqd: -	if (eqd != eqo->cur_eqd) { -		be_cmd_modify_eqd(adapter, eqo->q.id, eqd); -		eqo->cur_eqd = eqd; +		if (eqd != aic->prev_eqd) { +			set_eqd[num].delay_multiplier = (eqd * 65)/100; +			set_eqd[num].eq_id = eqo->q.id; +			aic->prev_eqd = eqd; +			num++; +		}  	} + +	if (num) +		be_cmd_modify_eqd(adapter, set_eqd, num);  }  static void be_rx_stats_update(struct be_rx_obj *rxo, -		struct be_rx_compl_info *rxcp) +			       struct be_rx_compl_info *rxcp)  {  	struct be_rx_stats *stats = rx_stats(rxo); @@ -1329,28 +1523,34 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,  static inline bool csum_passed(struct be_rx_compl_info *rxcp)  {  	/* L4 checksum is not reliable for non TCP/UDP packets. -	 * Also ignore ipcksm for ipv6 pkts */ +	 * Also ignore ipcksm for ipv6 pkts +	 */  	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && -				(rxcp->ip_csum || rxcp->ipv6); +		(rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;  } -static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo, -						u16 frag_idx) +static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)  {  	struct be_adapter *adapter = rxo->adapter;  	struct be_rx_page_info *rx_page_info;  	struct be_queue_info *rxq = &rxo->q; +	u16 frag_idx = rxq->tail;  	rx_page_info = &rxo->page_info_tbl[frag_idx];  	BUG_ON(!rx_page_info->page); -	if (rx_page_info->last_page_user) { +	if (rx_page_info->last_frag) {  		dma_unmap_page(&adapter->pdev->dev,  			       dma_unmap_addr(rx_page_info, bus),  			       adapter->big_page_size, DMA_FROM_DEVICE); -		rx_page_info->last_page_user = false; +		rx_page_info->last_frag = false; +	} else { +		dma_sync_single_for_cpu(&adapter->pdev->dev, +					dma_unmap_addr(rx_page_info, bus), +					rx_frag_size, DMA_FROM_DEVICE);  	} +	queue_tail_inc(rxq);  	atomic_dec(&rxq->used);  	return rx_page_info;  } @@ -1359,15 +1559,13 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,  static void be_rx_compl_discard(struct be_rx_obj *rxo,  				struct be_rx_compl_info *rxcp)  { -	struct be_queue_info *rxq = &rxo->q;  	struct be_rx_page_info *page_info;  	u16 i, num_rcvd = rxcp->num_rcvd;  	for (i = 0; i < num_rcvd; i++) { -		page_info = get_rx_page_info(rxo, rxcp->rxq_idx); +		page_info = get_rx_page_info(rxo);  		put_page(page_info->page);  		memset(page_info, 0, sizeof(*page_info)); -		index_inc(&rxcp->rxq_idx, rxq->len);  	}  } @@ -1378,13 +1576,12 @@ static void be_rx_compl_discard(struct be_rx_obj *rxo,  static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,  			     struct be_rx_compl_info *rxcp)  { -	struct be_queue_info *rxq = &rxo->q;  	struct be_rx_page_info *page_info;  	u16 i, j;  	u16 hdr_len, curr_frag_len, remaining;  	u8 *start; -	page_info = get_rx_page_info(rxo, rxcp->rxq_idx); +	page_info = get_rx_page_info(rxo);  	start = page_address(page_info->page) + page_info->page_offset;  	prefetch(start); @@ -1405,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,  		skb_frag_set_page(skb, 0, page_info->page);  		skb_shinfo(skb)->frags[0].page_offset =  					page_info->page_offset + hdr_len; -		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); +		skb_frag_size_set(&skb_shinfo(skb)->frags[0], +				  curr_frag_len - hdr_len);  		skb->data_len = curr_frag_len - hdr_len;  		skb->truesize += rx_frag_size;  		skb->tail += hdr_len; @@ -1418,10 +1616,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,  	}  	/* More frags present for this completion */ -	index_inc(&rxcp->rxq_idx, rxq->len);  	remaining = rxcp->pkt_size - curr_frag_len;  	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { -		page_info = get_rx_page_info(rxo, rxcp->rxq_idx); +		page_info = get_rx_page_info(rxo);  		curr_frag_len = min(remaining, rx_frag_size);  		/* Coalesce all frags from the same physical page in one slot */ @@ -1442,14 +1639,13 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,  		skb->data_len += curr_frag_len;  		skb->truesize += rx_frag_size;  		remaining -= curr_frag_len; -		index_inc(&rxcp->rxq_idx, rxq->len);  		page_info->page = NULL;  	}  	BUG_ON(j > MAX_SKB_FRAGS);  }  /* Process the RX completion indicated by rxcp when GRO is disabled */ -static void be_rx_compl_process(struct be_rx_obj *rxo, +static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,  				struct be_rx_compl_info *rxcp)  {  	struct be_adapter *adapter = rxo->adapter; @@ -1473,8 +1669,10 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,  	skb->protocol = eth_type_trans(skb, netdev);  	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);  	if (netdev->features & NETIF_F_RXHASH) -		skb->rxhash = rxcp->rss_hash; +		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); +	skb->encapsulation = rxcp->tunneled; +	skb_mark_napi_id(skb, napi);  	if (rxcp->vlanf)  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -1490,7 +1688,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,  	struct be_adapter *adapter = rxo->adapter;  	struct be_rx_page_info *page_info;  	struct sk_buff *skb = NULL; -	struct be_queue_info *rxq = &rxo->q;  	u16 remaining, curr_frag_len;  	u16 i, j; @@ -1502,7 +1699,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,  	remaining = rxcp->pkt_size;  	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { -		page_info = get_rx_page_info(rxo, rxcp->rxq_idx); +		page_info = get_rx_page_info(rxo);  		curr_frag_len = min(remaining, rx_frag_size); @@ -1520,7 +1717,6 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,  		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);  		skb->truesize += rx_frag_size;  		remaining -= curr_frag_len; -		index_inc(&rxcp->rxq_idx, rxq->len);  		memset(page_info, 0, sizeof(*page_info));  	}  	BUG_ON(j > MAX_SKB_FRAGS); @@ -1531,7 +1727,10 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,  	skb->ip_summed = CHECKSUM_UNNECESSARY;  	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);  	if (adapter->netdev->features & NETIF_F_RXHASH) -		skb->rxhash = rxcp->rss_hash; +		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); + +	skb->encapsulation = rxcp->tunneled; +	skb_mark_napi_id(skb, napi);  	if (rxcp->vlanf)  		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); @@ -1554,8 +1753,6 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,  		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);  	rxcp->ipv6 =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); -	rxcp->rxq_idx = -		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);  	rxcp->num_rcvd =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);  	rxcp->pkt_type = @@ -1563,12 +1760,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,  	rxcp->rss_hash =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);  	if (rxcp->vlanf) { -		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, +		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,  					  compl); -		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, -					       compl); +		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, +					       vlan_tag, compl);  	}  	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); +	rxcp->tunneled = +		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);  }  static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, @@ -1586,8 +1785,6 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,  		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);  	rxcp->ipv6 =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); -	rxcp->rxq_idx = -		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);  	rxcp->num_rcvd =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);  	rxcp->pkt_type = @@ -1595,10 +1792,10 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,  	rxcp->rss_hash =  		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);  	if (rxcp->vlanf) { -		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, +		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,  					  compl); -		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, -					       compl); +		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, +					       vlan_tag, compl);  	}  	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);  	rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, @@ -1628,16 +1825,18 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)  		rxcp->l4_csum = 0;  	if (rxcp->vlanf) { -		/* vlanf could be wrongly set in some cards. -		 * ignore if vtm is not set */ -		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm) +		/* In QNQ modes, if qnq bit is not set, then the packet was +		 * tagged only with the transparent outer vlan-tag and must +		 * not be treated as a vlan packet by host +		 */ +		if (be_is_qnq_mode(adapter) && !rxcp->qnq)  			rxcp->vlanf = 0;  		if (!lancer_chip(adapter))  			rxcp->vlan_tag = swab16(rxcp->vlan_tag);  		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && -		    !adapter->vlan_tag[rxcp->vlan_tag]) +		    !test_bit(rxcp->vlan_tag, adapter->vids))  			rxcp->vlanf = 0;  	} @@ -1667,6 +1866,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)  	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;  	struct be_queue_info *rxq = &rxo->q;  	struct page *pagep = NULL; +	struct device *dev = &adapter->pdev->dev;  	struct be_eth_rx_d *rxd;  	u64 page_dmaaddr = 0, frag_dmaaddr;  	u32 posted, page_offset = 0; @@ -1679,20 +1879,25 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)  				rx_stats(rxo)->rx_post_fail++;  				break;  			} -			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, -						    0, adapter->big_page_size, +			page_dmaaddr = dma_map_page(dev, pagep, 0, +						    adapter->big_page_size,  						    DMA_FROM_DEVICE); -			page_info->page_offset = 0; +			if (dma_mapping_error(dev, page_dmaaddr)) { +				put_page(pagep); +				pagep = NULL; +				rx_stats(rxo)->rx_post_fail++; +				break; +			} +			page_offset = 0;  		} else {  			get_page(pagep); -			page_info->page_offset = page_offset + rx_frag_size; +			page_offset += rx_frag_size;  		} -		page_offset = page_info->page_offset; +		page_info->page_offset = page_offset;  		page_info->page = pagep; -		dma_unmap_addr_set(page_info, bus, page_dmaaddr); -		frag_dmaaddr = page_dmaaddr + page_info->page_offset;  		rxd = queue_head_node(rxq); +		frag_dmaaddr = page_dmaaddr + page_info->page_offset;  		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);  		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); @@ -1700,18 +1905,29 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)  		if ((page_offset + rx_frag_size + rx_frag_size) >  					adapter->big_page_size) {  			pagep = NULL; -			page_info->last_page_user = true; +			page_info->last_frag = true; +			dma_unmap_addr_set(page_info, bus, page_dmaaddr); +		} else { +			dma_unmap_addr_set(page_info, bus, frag_dmaaddr);  		}  		prev_page_info = page_info;  		queue_head_inc(rxq);  		page_info = &rxo->page_info_tbl[rxq->head];  	} -	if (pagep) -		prev_page_info->last_page_user = true; + +	/* Mark the last frag of a page when we break out of the above loop +	 * with no more slots available in the RXQ +	 */ +	if (pagep) { +		prev_page_info->last_frag = true; +		dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr); +	}  	if (posted) {  		atomic_add(posted, &rxq->used); +		if (rxo->rx_post_starved) +			rxo->rx_post_starved = false;  		be_rxq_notify(adapter, rxq->id, posted);  	} else if (atomic_read(&rxq->used) == 0) {  		/* Let be_worker replenish when memory is available */ @@ -1736,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)  }  static u16 be_tx_compl_process(struct be_adapter *adapter, -		struct be_tx_obj *txo, u16 last_index) +			       struct be_tx_obj *txo, u16 last_index)  {  	struct be_queue_info *txq = &txo->q;  	struct be_eth_wrb *wrb; @@ -1763,7 +1979,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,  		queue_tail_inc(txq);  	} while (cur_index != last_index); -	kfree_skb(sent_skb); +	dev_kfree_skb_any(sent_skb);  	return num_wrbs;  } @@ -1803,7 +2019,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)  	struct be_rx_compl_info *rxcp;  	struct be_adapter *adapter = rxo->adapter;  	int flush_wait = 0; -	u16 tail;  	/* Consume pending rx completions.  	 * Wait for the flush completion (identified by zero num_rcvd) @@ -1836,9 +2051,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)  	be_cq_notify(adapter, rx_cq->id, false, 0);  	/* Then free posted rx buffers that were not used */ -	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; -	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { -		page_info = get_rx_page_info(rxo, tail); +	while (atomic_read(&rxq->used) > 0) { +		page_info = get_rx_page_info(rxo);  		put_page(page_info->page);  		memset(page_info, 0, sizeof(*page_info));  	} @@ -1856,11 +2070,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)  	bool dummy_wrb;  	int i, pending_txqs; -	/* Wait for a max of 200ms for all the tx-completions to arrive. */ +	/* Stop polling for compls when HW has been silent for 10ms */  	do {  		pending_txqs = adapter->num_tx_qs;  		for_all_tx_queues(adapter, txo, i) { +			cmpl = 0; +			num_wrbs = 0;  			txq = &txo->q;  			while ((txcp = be_tx_compl_get(&txo->cq))) {  				end_idx = @@ -1873,14 +2089,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)  			if (cmpl) {  				be_cq_notify(adapter, txo->cq.id, false, cmpl);  				atomic_sub(num_wrbs, &txq->used); -				cmpl = 0; -				num_wrbs = 0; +				timeo = 0;  			}  			if (atomic_read(&txq->used) == 0)  				pending_txqs--;  		} -		if (pending_txqs == 0 || ++timeo > 200) +		if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))  			break;  		mdelay(1); @@ -1914,6 +2129,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)  		if (eqo->q.created) {  			be_eq_clean(eqo);  			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); +			napi_hash_del(&eqo->napi);  			netif_napi_del(&eqo->napi);  		}  		be_queue_free(adapter, &eqo->q); @@ -1924,6 +2140,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)  {  	struct be_queue_info *eq;  	struct be_eq_obj *eqo; +	struct be_aic_obj *aic;  	int i, rc;  	adapter->num_evt_qs = min_t(u16, num_irqs(adapter), @@ -1932,15 +2149,17 @@ static int be_evt_queues_create(struct be_adapter *adapter)  	for_all_evt_queues(adapter, eqo, i) {  		netif_napi_add(adapter->netdev, &eqo->napi, be_poll,  			       BE_NAPI_WEIGHT); +		napi_hash_add(&eqo->napi); +		aic = &adapter->aic_obj[i];  		eqo->adapter = adapter;  		eqo->tx_budget = BE_TX_BUDGET;  		eqo->idx = i; -		eqo->max_eqd = BE_MAX_EQD; -		eqo->enable_aic = true; +		aic->max_eqd = BE_MAX_EQD; +		aic->enable = true;  		eq = &eqo->q;  		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, -					sizeof(struct be_eq_entry)); +				    sizeof(struct be_eq_entry));  		if (rc)  			return rc; @@ -1973,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)  	cq = &adapter->mcc_obj.cq;  	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, -			sizeof(struct be_mcc_compl))) +			   sizeof(struct be_mcc_compl)))  		goto err;  	/* Use the default EQ for MCC completions */ @@ -2033,6 +2252,9 @@ static int be_tx_qs_create(struct be_adapter *adapter)  		if (status)  			return status; +		u64_stats_init(&txo->stats.sync); +		u64_stats_init(&txo->stats.sync_compl); +  		/* If num_evt_qs is less than num_tx_qs, then more than  		 * one txq share an eq  		 */ @@ -2090,10 +2312,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter)  		rxo->adapter = adapter;  		cq = &rxo->cq;  		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, -				sizeof(struct be_eth_rx_compl)); +				    sizeof(struct be_eth_rx_compl));  		if (rc)  			return rc; +		u64_stats_init(&rxo->stats.sync);  		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;  		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);  		if (rc) @@ -2153,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)  }  static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, -			int budget) +			 int budget, int polling)  {  	struct be_adapter *adapter = rxo->adapter;  	struct be_queue_info *rx_cq = &rxo->cq; @@ -2179,15 +2402,17 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,  		 * promiscuous mode on some skews  		 */  		if (unlikely(rxcp->port != adapter->port_num && -				!lancer_chip(adapter))) { +			     !lancer_chip(adapter))) {  			be_rx_compl_discard(rxo, rxcp);  			goto loop_continue;  		} -		if (do_gro(rxcp)) +		/* Don't do gro when we're busy_polling */ +		if (do_gro(rxcp) && polling != BUSY_POLLING)  			be_rx_compl_process_gro(rxo, napi, rxcp);  		else -			be_rx_compl_process(rxo, rxcp); +			be_rx_compl_process(rxo, napi, rxcp); +  loop_continue:  		be_rx_stats_update(rxo, rxcp);  	} @@ -2195,7 +2420,11 @@ loop_continue:  	if (work_done) {  		be_cq_notify(adapter, rx_cq->id, true, work_done); -		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) +		/* When an rx-obj gets into post_starved state, just +		 * let be_worker do the posting. +		 */ +		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && +		    !rxo->rx_post_starved)  			be_post_rx_frags(rxo, GFP_ATOMIC);  	} @@ -2213,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,  		if (!txcp)  			break;  		num_wrbs += be_tx_compl_process(adapter, txo, -				AMAP_GET_BITS(struct amap_eth_tx_compl, -					wrb_index, txcp)); +						AMAP_GET_BITS(struct +							      amap_eth_tx_compl, +							      wrb_index, txcp));  	}  	if (work_done) { @@ -2224,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,  		/* As Tx wrbs have been freed up, wake up netdev queue  		 * if it was stopped due to lack of tx wrbs.  */  		if (__netif_subqueue_stopped(adapter->netdev, idx) && -			atomic_read(&txo->q.used) < txo->q.len / 2) { +		    atomic_read(&txo->q.used) < txo->q.len / 2) {  			netif_wake_subqueue(adapter->netdev, idx);  		} @@ -2240,6 +2470,7 @@ int be_poll(struct napi_struct *napi, int budget)  	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);  	struct be_adapter *adapter = eqo->adapter;  	int max_work = 0, work, i, num_evts; +	struct be_rx_obj *rxo;  	bool tx_done;  	num_evts = events_get(eqo); @@ -2252,13 +2483,18 @@ int be_poll(struct napi_struct *napi, int budget)  			max_work = budget;  	} -	/* This loop will iterate twice for EQ0 in which -	 * completions of the last RXQ (default one) are also processed -	 * For other EQs the loop iterates only once -	 */ -	for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) { -		work = be_process_rx(&adapter->rx_obj[i], napi, budget); -		max_work = max(work, max_work); +	if (be_lock_napi(eqo)) { +		/* This loop will iterate twice for EQ0 in which +		 * completions of the last RXQ (default one) are also processed +		 * For other EQs the loop iterates only once +		 */ +		for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { +			work = be_process_rx(rxo, napi, budget, NAPI_POLLING); +			max_work = max(work, max_work); +		} +		be_unlock_napi(eqo); +	} else { +		max_work = budget;  	}  	if (is_mcc_eqo(eqo)) @@ -2274,11 +2510,36 @@ int be_poll(struct napi_struct *napi, int budget)  	return max_work;  } +#ifdef CONFIG_NET_RX_BUSY_POLL +static int be_busy_poll(struct napi_struct *napi) +{ +	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); +	struct be_adapter *adapter = eqo->adapter; +	struct be_rx_obj *rxo; +	int i, work = 0; + +	if (!be_lock_busy_poll(eqo)) +		return LL_FLUSH_BUSY; + +	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { +		work = be_process_rx(rxo, napi, 4, BUSY_POLLING); +		if (work) +			break; +	} + +	be_unlock_busy_poll(eqo); +	return work; +} +#endif +  void be_detect_error(struct be_adapter *adapter)  {  	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;  	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;  	u32 i; +	bool error_detected = false; +	struct device *dev = &adapter->pdev->dev; +	struct net_device *netdev = adapter->netdev;  	if (be_hw_error(adapter))  		return; @@ -2287,59 +2548,64 @@ void be_detect_error(struct be_adapter *adapter)  		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);  		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {  			sliport_err1 = ioread32(adapter->db + -					SLIPORT_ERROR1_OFFSET); +						SLIPORT_ERROR1_OFFSET);  			sliport_err2 = ioread32(adapter->db + -					SLIPORT_ERROR2_OFFSET); +						SLIPORT_ERROR2_OFFSET); +			adapter->hw_error = true; +			/* Do not log error messages if its a FW reset */ +			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && +			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) { +				dev_info(dev, "Firmware update in progress\n"); +			} else { +				error_detected = true; +				dev_err(dev, "Error detected in the card\n"); +				dev_err(dev, "ERR: sliport status 0x%x\n", +					sliport_status); +				dev_err(dev, "ERR: sliport error1 0x%x\n", +					sliport_err1); +				dev_err(dev, "ERR: sliport error2 0x%x\n", +					sliport_err2); +			}  		}  	} else {  		pci_read_config_dword(adapter->pdev, -				PCICFG_UE_STATUS_LOW, &ue_lo); +				      PCICFG_UE_STATUS_LOW, &ue_lo);  		pci_read_config_dword(adapter->pdev, -				PCICFG_UE_STATUS_HIGH, &ue_hi); +				      PCICFG_UE_STATUS_HIGH, &ue_hi);  		pci_read_config_dword(adapter->pdev, -				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); +				      PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);  		pci_read_config_dword(adapter->pdev, -				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); +				      PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);  		ue_lo = (ue_lo & ~ue_lo_mask);  		ue_hi = (ue_hi & ~ue_hi_mask); -	} - -	/* On certain platforms BE hardware can indicate spurious UEs. -	 * Allow the h/w to stop working completely in case of a real UE. -	 * Hence not setting the hw_error for UE detection. -	 */ -	if (sliport_status & SLIPORT_STATUS_ERR_MASK) { -		adapter->hw_error = true; -		dev_err(&adapter->pdev->dev, -			"Error detected in the card\n"); -	} - -	if (sliport_status & SLIPORT_STATUS_ERR_MASK) { -		dev_err(&adapter->pdev->dev, -			"ERR: sliport status 0x%x\n", sliport_status); -		dev_err(&adapter->pdev->dev, -			"ERR: sliport error1 0x%x\n", sliport_err1); -		dev_err(&adapter->pdev->dev, -			"ERR: sliport error2 0x%x\n", sliport_err2); -	} -	if (ue_lo) { -		for (i = 0; ue_lo; ue_lo >>= 1, i++) { -			if (ue_lo & 1) -				dev_err(&adapter->pdev->dev, -				"UE: %s bit set\n", ue_status_low_desc[i]); -		} -	} +		/* On certain platforms BE hardware can indicate spurious UEs. +		 * Allow HW to stop working completely in case of a real UE. +		 * Hence not setting the hw_error for UE detection. +		 */ -	if (ue_hi) { -		for (i = 0; ue_hi; ue_hi >>= 1, i++) { -			if (ue_hi & 1) -				dev_err(&adapter->pdev->dev, -				"UE: %s bit set\n", ue_status_hi_desc[i]); +		if (ue_lo || ue_hi) { +			error_detected = true; +			dev_err(dev, +				"Unrecoverable Error detected in the adapter"); +			dev_err(dev, "Please reboot server to recover"); +			if (skyhawk_chip(adapter)) +				adapter->hw_error = true; +			for (i = 0; ue_lo; ue_lo >>= 1, i++) { +				if (ue_lo & 1) +					dev_err(dev, "UE: %s bit set\n", +						ue_status_low_desc[i]); +			} +			for (i = 0; ue_hi; ue_hi >>= 1, i++) { +				if (ue_hi & 1) +					dev_err(dev, "UE: %s bit set\n", +						ue_status_hi_desc[i]); +			}  		}  	} - +	if (error_detected) +		netif_carrier_off(netdev);  }  static void be_msix_disable(struct be_adapter *adapter) @@ -2353,7 +2619,7 @@ static void be_msix_disable(struct be_adapter *adapter)  static int be_msix_enable(struct be_adapter *adapter)  { -	int i, status, num_vec; +	int i, num_vec;  	struct device *dev = &adapter->pdev->dev;  	/* If RoCE is supported, program the max number of NIC vectors that @@ -2369,24 +2635,11 @@ static int be_msix_enable(struct be_adapter *adapter)  	for (i = 0; i < num_vec; i++)  		adapter->msix_entries[i].entry = i; -	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); -	if (status == 0) { -		goto done; -	} else if (status >= MIN_MSIX_VECTORS) { -		num_vec = status; -		status = pci_enable_msix(adapter->pdev, adapter->msix_entries, -					 num_vec); -		if (!status) -			goto done; -	} +	num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, +					MIN_MSIX_VECTORS, num_vec); +	if (num_vec < 0) +		goto fail; -	dev_warn(dev, "MSIx enable failed\n"); - -	/* INTx is not supported in VFs, so fail probe if enable_msix fails */ -	if (!be_physfn(adapter)) -		return status; -	return 0; -done:  	if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {  		adapter->num_msix_roce_vec = num_vec / 2;  		dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", @@ -2398,10 +2651,18 @@ done:  	dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",  		 adapter->num_msix_vec);  	return 0; + +fail: +	dev_warn(dev, "MSIx enable failed\n"); + +	/* INTx is not supported in VFs, so fail probe if enable_msix fails */ +	if (!be_physfn(adapter)) +		return num_vec; +	return 0;  }  static inline int be_msix_vec_get(struct be_adapter *adapter, -				struct be_eq_obj *eqo) +				  struct be_eq_obj *eqo)  {  	return adapter->msix_entries[eqo->msix_idx].vector;  } @@ -2425,7 +2686,7 @@ err_msix:  	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)  		free_irq(be_msix_vec_get(adapter, eqo), eqo);  	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", -		status); +		 status);  	be_msix_disable(adapter);  	return status;  } @@ -2503,11 +2764,19 @@ static int be_close(struct net_device *netdev)  	struct be_eq_obj *eqo;  	int i; +	/* This protection is needed as be_close() may be called even when the +	 * adapter is in cleared state (after eeh perm failure) +	 */ +	if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) +		return 0; +  	be_roce_dev_close(adapter);  	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { -		for_all_evt_queues(adapter, eqo, i) +		for_all_evt_queues(adapter, eqo, i) {  			napi_disable(&eqo->napi); +			be_disable_busy_poll(eqo); +		}  		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;  	} @@ -2521,6 +2790,11 @@ static int be_close(struct net_device *netdev)  	be_rx_qs_destroy(adapter); +	for (i = 1; i < (adapter->uc_macs + 1); i++) +		be_cmd_pmac_del(adapter, adapter->if_handle, +				adapter->pmac_id[i], 0); +	adapter->uc_macs = 0; +  	for_all_evt_queues(adapter, eqo, i) {  		if (msix_enabled(adapter))  			synchronize_irq(be_msix_vec_get(adapter, eqo)); @@ -2538,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)  {  	struct be_rx_obj *rxo;  	int rc, i, j; -	u8 rsstable[128]; +	u8 rss_hkey[RSS_HASH_KEY_LEN]; +	struct rss_info *rss = &adapter->rss_info;  	for_all_rx_queues(adapter, rxo, i) {  		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, @@ -2563,28 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)  	}  	if (be_multi_rxq(adapter)) { -		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { +		for (j = 0; j < RSS_INDIR_TABLE_LEN; +			j += adapter->num_rx_qs - 1) {  			for_all_rss_queues(adapter, rxo, i) { -				if ((j + i) >= 128) +				if ((j + i) >= RSS_INDIR_TABLE_LEN)  					break; -				rsstable[j + i] = rxo->rss_id; +				rss->rsstable[j + i] = rxo->rss_id; +				rss->rss_queue[j + i] = i;  			}  		} -		adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | -					RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; +		rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | +			RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;  		if (!BEx_chip(adapter)) -			adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | -						RSS_ENABLE_UDP_IPV6; +			rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | +				RSS_ENABLE_UDP_IPV6; +	} else { +		/* Disable RSS, if only default RX Q is created */ +		rss->rss_flags = RSS_ENABLE_NONE; +	} -		rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, -				       128); -		if (rc) { -			adapter->rss_flags = 0; -			return rc; -		} +	get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN); +	rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, +			       128, rss_hkey); +	if (rc) { +		rss->rss_flags = RSS_ENABLE_NONE; +		return rc;  	} +	memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN); +  	/* First time posting */  	for_all_rx_queues(adapter, rxo, i)  		be_post_rx_frags(rxo, GFP_KERNEL); @@ -2618,7 +2901,8 @@ static int be_open(struct net_device *netdev)  	for_all_evt_queues(adapter, eqo, i) {  		napi_enable(&eqo->napi); -		be_eq_notify(adapter, eqo->q.id, true, false, 0); +		be_enable_busy_poll(eqo); +		be_eq_notify(adapter, eqo->q.id, true, true, 0);  	}  	adapter->flags |= BE_FLAGS_NAPI_ENABLED; @@ -2628,6 +2912,12 @@ static int be_open(struct net_device *netdev)  	netif_tx_start_all_queues(netdev);  	be_roce_dev_open(adapter); + +#ifdef CONFIG_BE2NET_VXLAN +	if (skyhawk_chip(adapter)) +		vxlan_get_rx_port(netdev); +#endif +  	return 0;  err:  	be_close(adapter->netdev); @@ -2650,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)  	if (enable) {  		status = pci_write_config_dword(adapter->pdev, -			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); +						PCICFG_PM_CONTROL_OFFSET, +						PCICFG_PM_CONTROL_MASK);  		if (status) {  			dev_err(&adapter->pdev->dev,  				"Could not enable Wake-on-lan\n"); @@ -2659,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)  			return status;  		}  		status = be_cmd_enable_magic_wol(adapter, -				adapter->netdev->dev_addr, &cmd); +						 adapter->netdev->dev_addr, +						 &cmd);  		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);  		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);  	} else { @@ -2698,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)  		if (status)  			dev_err(&adapter->pdev->dev, -			"Mac address assignment failed for VF %d\n", vf); +				"Mac address assignment failed for VF %d\n", +				vf);  		else  			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); @@ -2712,14 +3005,11 @@ static int be_vfs_mac_query(struct be_adapter *adapter)  	int status, vf;  	u8 mac[ETH_ALEN];  	struct be_vf_cfg *vf_cfg; -	bool active = false;  	for_all_vfs(adapter, vf_cfg, vf) { -		be_cmd_get_mac_from_list(adapter, mac, &active, -					 &vf_cfg->pmac_id, 0); - -		status = be_cmd_mac_addr_query(adapter, mac, false, -					       vf_cfg->if_handle, 0); +		status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id, +					       mac, vf_cfg->if_handle, +					       false, vf+1);  		if (status)  			return status;  		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); @@ -2771,29 +3061,55 @@ static void be_cancel_worker(struct be_adapter *adapter)  	}  } -static int be_clear(struct be_adapter *adapter) +static void be_mac_clear(struct be_adapter *adapter)  {  	int i; +	if (adapter->pmac_id) { +		for (i = 0; i < (adapter->uc_macs + 1); i++) +			be_cmd_pmac_del(adapter, adapter->if_handle, +					adapter->pmac_id[i], 0); +		adapter->uc_macs = 0; + +		kfree(adapter->pmac_id); +		adapter->pmac_id = NULL; +	} +} + +#ifdef CONFIG_BE2NET_VXLAN +static void be_disable_vxlan_offloads(struct be_adapter *adapter) +{ +	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) +		be_cmd_manage_iface(adapter, adapter->if_handle, +				    OP_CONVERT_TUNNEL_TO_NORMAL); + +	if (adapter->vxlan_port) +		be_cmd_set_vxlan_port(adapter, 0); + +	adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS; +	adapter->vxlan_port = 0; +} +#endif + +static int be_clear(struct be_adapter *adapter) +{  	be_cancel_worker(adapter);  	if (sriov_enabled(adapter))  		be_vf_clear(adapter); +#ifdef CONFIG_BE2NET_VXLAN +	be_disable_vxlan_offloads(adapter); +#endif  	/* delete the primary mac along with the uc-mac list */ -	for (i = 0; i < (adapter->uc_macs + 1); i++) -		be_cmd_pmac_del(adapter, adapter->if_handle, -				adapter->pmac_id[i], 0); -	adapter->uc_macs = 0; +	be_mac_clear(adapter);  	be_cmd_if_destroy(adapter, adapter->if_handle,  0);  	be_clear_queues(adapter); -	kfree(adapter->pmac_id); -	adapter->pmac_id = NULL; -  	be_msix_disable(adapter); +	adapter->flags &= ~BE_FLAGS_SETUP_DONE;  	return 0;  } @@ -2802,7 +3118,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)  	struct be_resources res = {0};  	struct be_vf_cfg *vf_cfg;  	u32 cap_flags, en_flags, vf; -	int status; +	int status = 0;  	cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |  		    BE_IF_FLAGS_MULTICAST; @@ -2817,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)  		/* If a FW profile exists, then cap_flags are updated */  		en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | -			   BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); -		status = be_cmd_if_create(adapter, cap_flags, en_flags, -					  &vf_cfg->if_handle, vf + 1); +					BE_IF_FLAGS_BROADCAST | +					BE_IF_FLAGS_MULTICAST); +		status = +		    be_cmd_if_create(adapter, cap_flags, en_flags, +				     &vf_cfg->if_handle, vf + 1);  		if (status)  			goto err;  	} @@ -2846,10 +3164,9 @@ static int be_vf_setup_init(struct be_adapter *adapter)  static int be_vf_setup(struct be_adapter *adapter)  { +	struct device *dev = &adapter->pdev->dev;  	struct be_vf_cfg *vf_cfg; -	u16 def_vlan, lnk_speed;  	int status, old_vfs, vf; -	struct device *dev = &adapter->pdev->dev;  	u32 privileges;  	old_vfs = pci_num_vf(adapter->pdev); @@ -2906,24 +3223,16 @@ static int be_vf_setup(struct be_adapter *adapter)  					 vf);  		} -		/* BE3 FW, by default, caps VF TX-rate to 100mbps. -		 * Allow full available bandwidth -		 */ -		if (BE3_chip(adapter) && !old_vfs) -			be_cmd_set_qos(adapter, 1000, vf+1); +		/* Allow full available bandwidth */ +		if (!old_vfs) +			be_cmd_config_qos(adapter, 0, 0, vf + 1); -		status = be_cmd_link_status_query(adapter, &lnk_speed, -						  NULL, vf + 1); -		if (!status) -			vf_cfg->tx_rate = lnk_speed; - -		status = be_cmd_get_hsw_config(adapter, &def_vlan, -					       vf + 1, vf_cfg->if_handle, NULL); -		if (status) -			goto err; -		vf_cfg->def_vid = def_vlan; - -		be_cmd_enable_vf(adapter, vf + 1); +		if (!old_vfs) { +			be_cmd_enable_vf(adapter, vf + 1); +			be_cmd_set_logical_link_config(adapter, +						       IFLA_VF_LINK_STATE_AUTO, +						       vf+1); +		}  	}  	if (!old_vfs) { @@ -2941,19 +3250,38 @@ err:  	return status;  } +/* Converting function_mode bits on BE3 to SH mc_type enums */ + +static u8 be_convert_mc_type(u32 function_mode) +{ +	if (function_mode & VNIC_MODE && function_mode & QNQ_MODE) +		return vNIC1; +	else if (function_mode & QNQ_MODE) +		return FLEX10; +	else if (function_mode & VNIC_MODE) +		return vNIC2; +	else if (function_mode & UMC_ENABLED) +		return UMC; +	else +		return MC_NONE; +} +  /* On BE2/BE3 FW does not suggest the supported limits */  static void BEx_get_resources(struct be_adapter *adapter,  			      struct be_resources *res)  {  	struct pci_dev *pdev = adapter->pdev;  	bool use_sriov = false; - -	if (BE3_chip(adapter) && be_physfn(adapter)) { -		int max_vfs; - -		max_vfs = pci_sriov_get_totalvfs(pdev); -		res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; -		use_sriov = res->max_vfs && num_vfs; +	int max_vfs = 0; + +	if (be_physfn(adapter) && BE3_chip(adapter)) { +		be_cmd_get_profile_config(adapter, res, 0); +		/* Some old versions of BE3 FW don't report max_vfs value */ +		if (res->max_vfs == 0) { +			max_vfs = pci_sriov_get_totalvfs(pdev); +			res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; +		} +		use_sriov = res->max_vfs && sriov_want(adapter);  	}  	if (be_physfn(adapter)) @@ -2961,14 +3289,32 @@ static void BEx_get_resources(struct be_adapter *adapter,  	else  		res->max_uc_mac = BE_VF_UC_PMAC_COUNT; -	if (adapter->function_mode & FLEX10_MODE) -		res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; -	else +	adapter->mc_type = be_convert_mc_type(adapter->function_mode); + +	if (be_is_mc(adapter)) { +		/* Assuming that there are 4 channels per port, +		 * when multi-channel is enabled +		 */ +		if (be_is_qnq_mode(adapter)) +			res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; +		else +			/* In a non-qnq multichannel mode, the pvid +			 * takes up one vlan entry +			 */ +			res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1; +	} else {  		res->max_vlans = BE_NUM_VLANS_SUPPORTED; +	} +  	res->max_mcast_mac = BE_MAX_MC; -	if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || -	    !be_physfn(adapter)) +	/* 1) For BE3 1Gb ports, FW does not support multiple TXQs +	 * 2) Create multiple TX rings on a BE3-R multi-channel interface +	 *    *only* if it is RSS-capable. +	 */ +	if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) || +	    !be_physfn(adapter) || (be_is_mc(adapter) && +	    !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))  		res->max_tx_qs = 1;  	else  		res->max_tx_qs = BE3_MAX_TX_QS; @@ -2979,7 +3325,11 @@ static void BEx_get_resources(struct be_adapter *adapter,  					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;  	res->max_rx_qs = res->max_rss_qs + 1; -	res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; +	if (be_physfn(adapter)) +		res->max_evt_qs = (res->max_vfs > 0) ? +					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; +	else +		res->max_evt_qs = 1;  	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;  	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) @@ -3010,14 +3360,6 @@ static int be_get_resources(struct be_adapter *adapter)  		adapter->res = res;  	} -	/* For BE3 only check if FW suggests a different max-txqs value */ -	if (BE3_chip(adapter)) { -		status = be_cmd_get_profile_config(adapter, &res, 0); -		if (!status && res.max_tx_qs) -			adapter->res.max_tx_qs = -				min(adapter->res.max_tx_qs, res.max_tx_qs); -	} -  	/* For Lancer, SH etc read per-function resource limits from FW.  	 * GET_FUNC_CONFIG returns per function guaranteed limits.  	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits @@ -3054,6 +3396,7 @@ static int be_get_resources(struct be_adapter *adapter)  /* Routine to query per function resource limits */  static int be_get_config(struct be_adapter *adapter)  { +	u16 profile_id;  	int status;  	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num, @@ -3063,13 +3406,19 @@ static int be_get_config(struct be_adapter *adapter)  	if (status)  		return status; +	 if (be_physfn(adapter)) { +		status = be_cmd_get_active_profile(adapter, &profile_id); +		if (!status) +			dev_info(&adapter->pdev->dev, +				 "Using profile 0x%x\n", profile_id); +	} +  	status = be_get_resources(adapter);  	if (status)  		return status; -	/* primary mac needs 1 pmac entry */ -	adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32), -				   GFP_KERNEL); +	adapter->pmac_id = kcalloc(be_max_uc(adapter), +				   sizeof(*adapter->pmac_id), GFP_KERNEL);  	if (!adapter->pmac_id)  		return -ENOMEM; @@ -3096,12 +3445,10 @@ static int be_mac_setup(struct be_adapter *adapter)  		memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);  	} -	/* On BE3 VFs this cmd may fail due to lack of privilege. -	 * Ignore the failure as in this case pmac_id is fetched -	 * in the IFACE_CREATE cmd. -	 */ -	be_cmd_pmac_add(adapter, mac, adapter->if_handle, -			&adapter->pmac_id[0], 0); +	/* For BE3-R VFs, the PF programs the initial MAC address */ +	if (!(BEx_chip(adapter) && be_virtfn(adapter))) +		be_cmd_pmac_add(adapter, mac, adapter->if_handle, +				&adapter->pmac_id[0], 0);  	return 0;  } @@ -3219,11 +3566,6 @@ static int be_setup(struct be_adapter *adapter)  		goto err;  	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); -	/* In UMC mode FW does not return right privileges. -	 * Override with correct privilege equivalent to PF. -	 */ -	if (be_is_mc(adapter)) -		adapter->cmd_privileges = MAX_PRIVILEGES;  	status = be_mac_setup(adapter);  	if (status) @@ -3231,18 +3573,30 @@ static int be_setup(struct be_adapter *adapter)  	be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash); +	if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { +		dev_err(dev, "Firmware on card is old(%s), IRQs may not work.", +			adapter->fw_ver); +		dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); +	} +  	if (adapter->vlans_added)  		be_vid_config(adapter);  	be_set_rx_mode(adapter->netdev); +	be_cmd_get_acpi_wol_cap(adapter); +  	be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);  	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)  		be_cmd_set_flow_control(adapter, adapter->tx_fc,  					adapter->rx_fc); -	if (be_physfn(adapter) && num_vfs) { +	if (be_physfn(adapter)) +		be_cmd_set_logical_link_config(adapter, +					       IFLA_VF_LINK_STATE_AUTO, 0); + +	if (sriov_want(adapter)) {  		if (be_max_vfs(adapter))  			be_vf_setup(adapter);  		else @@ -3254,6 +3608,7 @@ static int be_setup(struct be_adapter *adapter)  		adapter->phy.fc_autoneg = 1;  	be_schedule_worker(adapter); +	adapter->flags |= BE_FLAGS_SETUP_DONE;  	return 0;  err:  	be_clear(adapter); @@ -3276,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)  }  #endif -#define FW_FILE_HDR_SIGN 	"ServerEngines Corp. " -static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "}; - -static bool be_flash_redboot(struct be_adapter *adapter, -			const u8 *p, u32 img_start, int image_size, -			int hdr_size) -{ -	u32 crc_offset; -	u8 flashed_crc[4]; -	int status; - -	crc_offset = hdr_size + img_start + image_size - 4; - -	p += crc_offset; - -	status = be_cmd_get_flash_crc(adapter, flashed_crc, -			(image_size - 4)); -	if (status) { -		dev_err(&adapter->pdev->dev, -		"could not get crc from flash, not flashing redboot\n"); -		return false; -	} - -	/*update redboot only if crc does not match*/ -	if (!memcmp(flashed_crc, p, 4)) -		return false; -	else -		return true; -} +static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};  static bool phy_flashing_required(struct be_adapter *adapter)  { @@ -3335,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,  }  static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, -					 int header_size, -					 const struct firmware *fw) +						int header_size, +						const struct firmware *fw)  {  	struct flash_section_info *fsec = NULL;  	const u8 *p = fw->data; @@ -3351,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,  	return NULL;  } +static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, +			      u32 img_offset, u32 img_size, int hdr_size, +			      u16 img_optype, bool *crc_match) +{ +	u32 crc_offset; +	int status; +	u8 crc[4]; + +	status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4); +	if (status) +		return status; + +	crc_offset = hdr_size + img_offset + img_size - 4; + +	/* Skip flashing, if crc of flashed region matches */ +	if (!memcmp(crc, p + crc_offset, 4)) +		*crc_match = true; +	else +		*crc_match = false; + +	return status; +} +  static int be_flash(struct be_adapter *adapter, const u8 *img, -		struct be_dma_mem *flash_cmd, int optype, int img_size) +		    struct be_dma_mem *flash_cmd, int optype, int img_size)  { -	u32 total_bytes = 0, flash_op, num_bytes = 0; -	int status = 0;  	struct be_cmd_write_flashrom *req = flash_cmd->va; +	u32 total_bytes, flash_op, num_bytes; +	int status;  	total_bytes = img_size;  	while (total_bytes) { @@ -3379,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,  		memcpy(req->data_buf, img, num_bytes);  		img += num_bytes;  		status = be_cmd_write_flashrom(adapter, flash_cmd, optype, -						flash_op, num_bytes); -		if (status) { -			if (status == ILLEGAL_IOCTL_REQ && -			    optype == OPTYPE_PHY_FW) -				break; -			dev_err(&adapter->pdev->dev, -				"cmd to write to flash rom failed.\n"); +					       flash_op, num_bytes); +		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && +		    optype == OPTYPE_PHY_FW) +			break; +		else if (status)  			return status; -		}  	}  	return 0;  }  /* For BE2, BE3 and BE3-R */  static int be_flash_BEx(struct be_adapter *adapter, -			 const struct firmware *fw, -			 struct be_dma_mem *flash_cmd, -			 int num_of_images) - +			const struct firmware *fw, +			struct be_dma_mem *flash_cmd, int num_of_images)  { -	int status = 0, i, filehdr_size = 0;  	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); -	const u8 *p = fw->data; -	const struct flash_comp *pflashcomp; -	int num_comp, redboot; +	struct device *dev = &adapter->pdev->dev;  	struct flash_section_info *fsec = NULL; +	int status, i, filehdr_size, num_comp; +	const struct flash_comp *pflashcomp; +	bool crc_match; +	const u8 *p;  	struct flash_comp gen3_flash_types[] = {  		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, @@ -3461,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,  	/* Get flash section info*/  	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);  	if (!fsec) { -		dev_err(&adapter->pdev->dev, -			"Invalid Cookie. UFI corrupted ?\n"); +		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");  		return -1;  	}  	for (i = 0; i < num_comp; i++) { @@ -3478,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,  				continue;  		if (pflashcomp[i].optype == OPTYPE_REDBOOT) { -			redboot = be_flash_redboot(adapter, fw->data, -				pflashcomp[i].offset, pflashcomp[i].size, -				filehdr_size + img_hdrs_size); -			if (!redboot) +			status = be_check_flash_crc(adapter, fw->data, +						    pflashcomp[i].offset, +						    pflashcomp[i].size, +						    filehdr_size + +						    img_hdrs_size, +						    OPTYPE_REDBOOT, &crc_match); +			if (status) { +				dev_err(dev, +					"Could not get CRC for 0x%x region\n", +					pflashcomp[i].optype); +				continue; +			} + +			if (crc_match)  				continue;  		} -		p = fw->data; -		p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; +		p = fw->data + filehdr_size + pflashcomp[i].offset + +			img_hdrs_size;  		if (p + pflashcomp[i].size > fw->data + fw->size)  			return -1;  		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, -					pflashcomp[i].size); +				  pflashcomp[i].size);  		if (status) { -			dev_err(&adapter->pdev->dev, -				"Flashing section type %d failed.\n", +			dev_err(dev, "Flashing section type 0x%x failed\n",  				pflashcomp[i].img_type);  			return status;  		} @@ -3502,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,  	return 0;  } +static u16 be_get_img_optype(struct flash_section_entry fsec_entry) +{ +	u32 img_type = le32_to_cpu(fsec_entry.type); +	u16 img_optype = le16_to_cpu(fsec_entry.optype); + +	if (img_optype != 0xFFFF) +		return img_optype; + +	switch (img_type) { +	case IMAGE_FIRMWARE_iSCSI: +		img_optype = OPTYPE_ISCSI_ACTIVE; +		break; +	case IMAGE_BOOT_CODE: +		img_optype = OPTYPE_REDBOOT; +		break; +	case IMAGE_OPTION_ROM_ISCSI: +		img_optype = OPTYPE_BIOS; +		break; +	case IMAGE_OPTION_ROM_PXE: +		img_optype = OPTYPE_PXE_BIOS; +		break; +	case IMAGE_OPTION_ROM_FCoE: +		img_optype = OPTYPE_FCOE_BIOS; +		break; +	case IMAGE_FIRMWARE_BACKUP_iSCSI: +		img_optype = OPTYPE_ISCSI_BACKUP; +		break; +	case IMAGE_NCSI: +		img_optype = OPTYPE_NCSI_FW; +		break; +	case IMAGE_FLASHISM_JUMPVECTOR: +		img_optype = OPTYPE_FLASHISM_JUMPVECTOR; +		break; +	case IMAGE_FIRMWARE_PHY: +		img_optype = OPTYPE_SH_PHY_FW; +		break; +	case IMAGE_REDBOOT_DIR: +		img_optype = OPTYPE_REDBOOT_DIR; +		break; +	case IMAGE_REDBOOT_CONFIG: +		img_optype = OPTYPE_REDBOOT_CONFIG; +		break; +	case IMAGE_UFI_DIR: +		img_optype = OPTYPE_UFI_DIR; +		break; +	default: +		break; +	} + +	return img_optype; +} +  static int be_flash_skyhawk(struct be_adapter *adapter, -		const struct firmware *fw, -		struct be_dma_mem *flash_cmd, int num_of_images) +			    const struct firmware *fw, +			    struct be_dma_mem *flash_cmd, int num_of_images)  { -	int status = 0, i, filehdr_size = 0; -	int img_offset, img_size, img_optype, redboot;  	int img_hdrs_size = num_of_images * sizeof(struct image_hdr); -	const u8 *p = fw->data; +	struct device *dev = &adapter->pdev->dev;  	struct flash_section_info *fsec = NULL; +	u32 img_offset, img_size, img_type; +	int status, i, filehdr_size; +	bool crc_match, old_fw_img; +	u16 img_optype; +	const u8 *p;  	filehdr_size = sizeof(struct flash_file_hdr_g3);  	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);  	if (!fsec) { -		dev_err(&adapter->pdev->dev, -			"Invalid Cookie. UFI corrupted ?\n"); +		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");  		return -1;  	}  	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {  		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);  		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size); +		img_type   = le32_to_cpu(fsec->fsec_entry[i].type); +		img_optype = be_get_img_optype(fsec->fsec_entry[i]); +		old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; -		switch (le32_to_cpu(fsec->fsec_entry[i].type)) { -		case IMAGE_FIRMWARE_iSCSI: -			img_optype = OPTYPE_ISCSI_ACTIVE; -			break; -		case IMAGE_BOOT_CODE: -			img_optype = OPTYPE_REDBOOT; -			break; -		case IMAGE_OPTION_ROM_ISCSI: -			img_optype = OPTYPE_BIOS; -			break; -		case IMAGE_OPTION_ROM_PXE: -			img_optype = OPTYPE_PXE_BIOS; -			break; -		case IMAGE_OPTION_ROM_FCoE: -			img_optype = OPTYPE_FCOE_BIOS; -			break; -		case IMAGE_FIRMWARE_BACKUP_iSCSI: -			img_optype = OPTYPE_ISCSI_BACKUP; -			break; -		case IMAGE_NCSI: -			img_optype = OPTYPE_NCSI_FW; -			break; -		default: +		if (img_optype == 0xFFFF)  			continue; +		/* Don't bother verifying CRC if an old FW image is being +		 * flashed +		 */ +		if (old_fw_img) +			goto flash; + +		status = be_check_flash_crc(adapter, fw->data, img_offset, +					    img_size, filehdr_size + +					    img_hdrs_size, img_optype, +					    &crc_match); +		/* The current FW image on the card does not recognize the new +		 * FLASH op_type. The FW download is partially complete. +		 * Reboot the server now to enable FW image to recognize the +		 * new FLASH op_type. To complete the remaining process, +		 * download the same FW again after the reboot. +		 */ +		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || +		    base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { +			dev_err(dev, "Flash incomplete. Reset the server\n"); +			dev_err(dev, "Download FW image again after reset\n"); +			return -EAGAIN; +		} else if (status) { +			dev_err(dev, "Could not get CRC for 0x%x region\n", +				img_optype); +			return -EFAULT;  		} -		if (img_optype == OPTYPE_REDBOOT) { -			redboot = be_flash_redboot(adapter, fw->data, -					img_offset, img_size, -					filehdr_size + img_hdrs_size); -			if (!redboot) -				continue; -		} +		if (crc_match) +			continue; -		p = fw->data; -		p += filehdr_size + img_offset + img_hdrs_size; +flash: +		p = fw->data + filehdr_size + img_offset + img_hdrs_size;  		if (p + img_size > fw->data + fw->size)  			return -1;  		status = be_flash(adapter, p, flash_cmd, img_optype, img_size); -		if (status) { -			dev_err(&adapter->pdev->dev, -				"Flashing section type %d failed.\n", -				fsec->fsec_entry[i].type); -			return status; +		/* For old FW images ignore ILLEGAL_FIELD error or errors on +		 * UFI_DIR region +		 */ +		if (old_fw_img && +		    (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || +		     (img_optype == OPTYPE_UFI_DIR && +		      base_status(status) == MCC_STATUS_FAILED))) { +			continue; +		} else if (status) { +			dev_err(dev, "Flashing section type 0x%x failed\n", +				img_type); +			return -EFAULT;  		}  	}  	return 0;  }  static int lancer_fw_download(struct be_adapter *adapter, -				const struct firmware *fw) +			      const struct firmware *fw)  {  #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)  #define LANCER_FW_DOWNLOAD_LOCATION   "/prg" @@ -3641,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,  	}  	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, -				flash_cmd.dma); +			  flash_cmd.dma);  	if (status) {  		dev_err(&adapter->pdev->dev,  			"Firmware load error. " @@ -3651,6 +4067,8 @@ static int lancer_fw_download(struct be_adapter *adapter,  	}  	if (change_status == LANCER_FW_RESET_NEEDED) { +		dev_info(&adapter->pdev->dev, +			 "Resetting adapter to activate new FW\n");  		status = lancer_physdev_ctrl(adapter,  					     PHYSDEV_CONTROL_FW_RESET_MASK);  		if (status) { @@ -3660,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,  			goto lancer_fw_exit;  		}  	} else if (change_status != LANCER_NO_RESET_NEEDED) { -			dev_err(&adapter->pdev->dev, -				"System reboot required for new FW" -				" to be active\n"); +		dev_err(&adapter->pdev->dev, +			"System reboot required for new FW to be active\n");  	}  	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); @@ -3726,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)  			switch (ufi_type) {  			case UFI_TYPE4:  				status = be_flash_skyhawk(adapter, fw, -							&flash_cmd, num_imgs); +							  &flash_cmd, num_imgs);  				break;  			case UFI_TYPE3R:  				status = be_flash_BEx(adapter, fw, &flash_cmd, @@ -3796,8 +4213,7 @@ fw_exit:  	return status;  } -static int be_ndo_bridge_setlink(struct net_device *dev, -				    struct nlmsghdr *nlh) +static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)  {  	struct be_adapter *adapter = netdev_priv(dev);  	struct nlattr *attr, *br_spec; @@ -3839,8 +4255,7 @@ err:  }  static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, -				    struct net_device *dev, -				    u32 filter_mask) +				 struct net_device *dev, u32 filter_mask)  {  	struct be_adapter *adapter = netdev_priv(dev);  	int status = 0; @@ -3864,6 +4279,67 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,  				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);  } +#ifdef CONFIG_BE2NET_VXLAN +static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, +			      __be16 port) +{ +	struct be_adapter *adapter = netdev_priv(netdev); +	struct device *dev = &adapter->pdev->dev; +	int status; + +	if (lancer_chip(adapter) || BEx_chip(adapter)) +		return; + +	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { +		dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n", +			 be16_to_cpu(port)); +		dev_info(dev, +			 "Only one UDP port supported for VxLAN offloads\n"); +		return; +	} + +	status = be_cmd_manage_iface(adapter, adapter->if_handle, +				     OP_CONVERT_NORMAL_TO_TUNNEL); +	if (status) { +		dev_warn(dev, "Failed to convert normal interface to tunnel\n"); +		goto err; +	} + +	status = be_cmd_set_vxlan_port(adapter, port); +	if (status) { +		dev_warn(dev, "Failed to add VxLAN port\n"); +		goto err; +	} +	adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; +	adapter->vxlan_port = port; + +	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", +		 be16_to_cpu(port)); +	return; +err: +	be_disable_vxlan_offloads(adapter); +	return; +} + +static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, +			      __be16 port) +{ +	struct be_adapter *adapter = netdev_priv(netdev); + +	if (lancer_chip(adapter) || BEx_chip(adapter)) +		return; + +	if (adapter->vxlan_port != port) +		return; + +	be_disable_vxlan_offloads(adapter); + +	dev_info(&adapter->pdev->dev, +		 "Disabled VxLAN offloads for UDP port %d\n", +		 be16_to_cpu(port)); +} +#endif +  static const struct net_device_ops be_netdev_ops = {  	.ndo_open		= be_open,  	.ndo_stop		= be_close, @@ -3877,19 +4353,33 @@ static const struct net_device_ops be_netdev_ops = {  	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,  	.ndo_set_vf_mac		= be_set_vf_mac,  	.ndo_set_vf_vlan	= be_set_vf_vlan, -	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate, +	.ndo_set_vf_rate	= be_set_vf_tx_rate,  	.ndo_get_vf_config	= be_get_vf_config, +	.ndo_set_vf_link_state  = be_set_vf_link_state,  #ifdef CONFIG_NET_POLL_CONTROLLER  	.ndo_poll_controller	= be_netpoll,  #endif  	.ndo_bridge_setlink	= be_ndo_bridge_setlink,  	.ndo_bridge_getlink	= be_ndo_bridge_getlink, +#ifdef CONFIG_NET_RX_BUSY_POLL +	.ndo_busy_poll		= be_busy_poll, +#endif +#ifdef CONFIG_BE2NET_VXLAN +	.ndo_add_vxlan_port	= be_add_vxlan_port, +	.ndo_del_vxlan_port	= be_del_vxlan_port, +#endif  };  static void be_netdev_init(struct net_device *netdev)  {  	struct be_adapter *adapter = netdev_priv(netdev); +	if (skyhawk_chip(adapter)) { +		netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | +					   NETIF_F_TSO | NETIF_F_TSO6 | +					   NETIF_F_GSO_UDP_TUNNEL; +		netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; +	}  	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |  		NETIF_F_HW_VLAN_CTAG_TX; @@ -3910,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)  	netdev->netdev_ops = &be_netdev_ops; -	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); +	netdev->ethtool_ops = &be_ethtool_ops;  }  static void be_unmap_pci_bars(struct be_adapter *adapter) @@ -3944,11 +4434,6 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)  static int be_map_pci_bars(struct be_adapter *adapter)  {  	u8 __iomem *addr; -	u32 sli_intf; - -	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); -	adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> -				SLI_INTF_IF_TYPE_SHIFT;  	if (BEx_chip(adapter) && be_physfn(adapter)) {  		adapter->csr = pci_iomap(adapter->pdev, 2, 0); @@ -4029,7 +4514,7 @@ static int be_ctrl_init(struct be_adapter *adapter)  	spin_lock_init(&adapter->mcc_lock);  	spin_lock_init(&adapter->mcc_cq_lock); -	init_completion(&adapter->flash_compl); +	init_completion(&adapter->et_cmd_compl);  	pci_save_state(adapter->pdev);  	return 0; @@ -4061,9 +4546,11 @@ static int be_stats_init(struct be_adapter *adapter)  		cmd->size = sizeof(struct lancer_cmd_req_pport_stats);  	else if (BE2_chip(adapter))  		cmd->size = sizeof(struct be_cmd_req_get_stats_v0); -	else -		/* BE3 and Skyhawk */ +	else if (BE3_chip(adapter))  		cmd->size = sizeof(struct be_cmd_req_get_stats_v1); +	else +		/* ALL non-BE ASICs */ +		cmd->size = sizeof(struct be_cmd_req_get_stats_v2);  	cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,  				      GFP_KERNEL); @@ -4097,81 +4584,28 @@ static void be_remove(struct pci_dev *pdev)  	pci_disable_pcie_error_reporting(pdev); -	pci_set_drvdata(pdev, NULL);  	pci_release_regions(pdev);  	pci_disable_device(pdev);  	free_netdev(adapter->netdev);  } -bool be_is_wol_supported(struct be_adapter *adapter) -{ -	return ((adapter->wol_cap & BE_WOL_CAP) && -		!be_is_wol_excluded(adapter)) ? true : false; -} - -u32 be_get_fw_log_level(struct be_adapter *adapter) -{ -	struct be_dma_mem extfat_cmd; -	struct be_fat_conf_params *cfgs; -	int status; -	u32 level = 0; -	int j; - -	if (lancer_chip(adapter)) -		return 0; - -	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); -	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); -	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, -					     &extfat_cmd.dma); - -	if (!extfat_cmd.va) { -		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", -			__func__); -		goto err; -	} - -	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); -	if (!status) { -		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + -						sizeof(struct be_cmd_resp_hdr)); -		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { -			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) -				level = cfgs->module[0].trace_lvl[j].dbg_lvl; -		} -	} -	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, -			    extfat_cmd.dma); -err: -	return level; -} -  static int be_get_initial_config(struct be_adapter *adapter)  { -	int status; -	u32 level; +	int status, level;  	status = be_cmd_get_cntl_attributes(adapter);  	if (status)  		return status; -	status = be_cmd_get_acpi_wol_cap(adapter); -	if (status) { -		/* in case of a failure to get wol capabillities -		 * check the exclusion list to determine WOL capability */ -		if (!be_is_wol_excluded(adapter)) -			adapter->wol_cap |= BE_WOL_CAP; -	} - -	if (be_is_wol_supported(adapter)) -		adapter->wol = true; -  	/* Must be a power of 2 or else MODULO will BUG_ON */  	adapter->be_get_temp_freq = 64; -	level = be_get_fw_log_level(adapter); -	adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; +	if (BEx_chip(adapter)) { +		level = be_cmd_get_fw_log_level(adapter); +		adapter->msg_enable = +			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; +	}  	adapter->cfg_num_qs = netif_get_num_default_rss_queues();  	return 0; @@ -4203,13 +4637,13 @@ static int lancer_recover_func(struct be_adapter *adapter)  			goto err;  	} -	dev_err(dev, "Error recovery successful\n"); +	dev_err(dev, "Adapter recovery successful\n");  	return 0;  err:  	if (status == -EAGAIN)  		dev_err(dev, "Waiting for resource provisioning\n");  	else -		dev_err(dev, "Error recovery failed\n"); +		dev_err(dev, "Adapter recovery failed\n");  	return status;  } @@ -4246,7 +4680,6 @@ static void be_worker(struct work_struct *work)  	struct be_adapter *adapter =  		container_of(work, struct be_adapter, work.work);  	struct be_rx_obj *rxo; -	struct be_eq_obj *eqo;  	int i;  	/* when interrupts are not yet enabled, just reap any pending @@ -4271,14 +4704,14 @@ static void be_worker(struct work_struct *work)  		be_cmd_get_die_temperature(adapter);  	for_all_rx_queues(adapter, rxo, i) { -		if (rxo->rx_post_starved) { -			rxo->rx_post_starved = false; +		/* Replenish RX-queues starved due to memory +		 * allocation failures. +		 */ +		if (rxo->rx_post_starved)  			be_post_rx_frags(rxo, GFP_KERNEL); -		}  	} -	for_all_evt_queues(adapter, eqo, i) -		be_eqd_update(adapter, eqo); +	be_eqd_update(adapter);  reschedule:  	adapter->work_counter++; @@ -4293,14 +4726,32 @@ static bool be_reset_required(struct be_adapter *adapter)  static char *mc_name(struct be_adapter *adapter)  { -	if (adapter->function_mode & FLEX10_MODE) -		return "FLEX10"; -	else if (adapter->function_mode & VNIC_MODE) -		return "vNIC"; -	else if (adapter->function_mode & UMC_ENABLED) -		return "UMC"; -	else -		return ""; +	char *str = "";	/* default */ + +	switch (adapter->mc_type) { +	case UMC: +		str = "UMC"; +		break; +	case FLEX10: +		str = "FLEX10"; +		break; +	case vNIC1: +		str = "vNIC-1"; +		break; +	case nPAR: +		str = "nPAR"; +		break; +	case UFP: +		str = "UFP"; +		break; +	case vNIC2: +		str = "vNIC-2"; +		break; +	default: +		str = ""; +	} + +	return str;  }  static inline char *func_name(struct be_adapter *adapter) @@ -4335,28 +4786,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)  	adapter->netdev = netdev;  	SET_NETDEV_DEV(netdev, &pdev->dev); -	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); +	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));  	if (!status) { -		status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); -		if (status < 0) { -			dev_err(&pdev->dev, "dma_set_coherent_mask failed\n"); -			goto free_netdev; -		}  		netdev->features |= NETIF_F_HIGHDMA;  	} else { -		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); -		if (!status) -			status = dma_set_coherent_mask(&pdev->dev, -						       DMA_BIT_MASK(32)); +		status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));  		if (status) {  			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");  			goto free_netdev;  		}  	} -	status = pci_enable_pcie_error_reporting(pdev); -	if (status) -		dev_info(&pdev->dev, "Could not use PCIe error reporting\n"); +	if (be_physfn(adapter)) { +		status = pci_enable_pcie_error_reporting(pdev); +		if (!status) +			dev_info(&pdev->dev, "PCIe error reporting enabled\n"); +	}  	status = be_ctrl_init(adapter);  	if (status) @@ -4427,7 +4872,6 @@ ctrl_clean:  	be_ctrl_cleanup(adapter);  free_netdev:  	free_netdev(netdev); -	pci_set_drvdata(pdev, NULL);  rel_reg:  	pci_release_regions(pdev);  disable_dev: @@ -4442,9 +4886,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)  	struct be_adapter *adapter = pci_get_drvdata(pdev);  	struct net_device *netdev =  adapter->netdev; -	if (adapter->wol) +	if (adapter->wol_en)  		be_setup_wol(adapter, true); +	be_intr_set(adapter, false);  	cancel_delayed_work_sync(&adapter->func_recovery_work);  	netif_device_detach(netdev); @@ -4480,6 +4925,7 @@ static int be_resume(struct pci_dev *pdev)  	if (status)  		return status; +	be_intr_set(adapter, true);  	/* tell fw we're ready to fire cmds */  	status = be_cmd_fw_init(adapter);  	if (status) @@ -4496,7 +4942,7 @@ static int be_resume(struct pci_dev *pdev)  			      msecs_to_jiffies(1000));  	netif_device_attach(netdev); -	if (adapter->wol) +	if (adapter->wol_en)  		be_setup_wol(adapter, false);  	return 0; @@ -4523,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)  }  static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, -				pci_channel_state_t state) +					    pci_channel_state_t state)  {  	struct be_adapter *adapter = pci_get_drvdata(pdev);  	struct net_device *netdev =  adapter->netdev; @@ -4602,6 +5048,12 @@ static void be_eeh_resume(struct pci_dev *pdev)  	if (status)  		goto err; +	/* On some BE3 FW versions, after a HW reset, +	 * interrupts will remain disabled for each function. +	 * So, explicitly enable interrupts +	 */ +	be_intr_set(adapter, true); +  	/* tell fw we're ready to fire cmds */  	status = be_cmd_fw_init(adapter);  	if (status) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 9cd5415fe01..5bf16603a3e 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -35,6 +35,12 @@ static void _be_roce_dev_add(struct be_adapter *adapter)  	if (!ocrdma_drv)  		return; + +	if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) { +		dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n"); +		return; +	} +  	if (pdev->device == OC_DEVICE_ID5) {  		/* only msix is supported on these devices */  		if (!msix_enabled(adapter)) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index 2cd1129e19a..a3d9e96c18e 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@  /* - * Copyright (C) 2005 - 2013 Emulex + * Copyright (C) 2005 - 2014 Emulex   * All rights reserved.   *   * This program is free software; you can redistribute it and/or @@ -21,6 +21,8 @@  #include <linux/pci.h>  #include <linux/netdevice.h> +#define BE_ROCE_ABI_VERSION	1 +  struct ocrdma_dev;  enum be_interrupt_mode { @@ -52,6 +54,7 @@ struct be_dev_info {  /* ocrdma driver register's the callback functions with nic driver. */  struct ocrdma_driver {  	unsigned char name[32]; +	u32 be_abi_version;  	struct ocrdma_dev *(*add) (struct be_dev_info *dev_info);  	void (*remove) (struct ocrdma_dev *);  	void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);  | 
