aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x.h')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h366
1 files changed, 291 insertions, 75 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 3dba2a70a00..8206a293e6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
@@ -26,20 +26,18 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
#define BCM_DCBNL
#endif
-
#include "bnx2x_hsi.h"
#include "../cnic_if.h"
-
#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
#include <linux/mdio.h>
@@ -77,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -114,7 +121,6 @@ do { \
#define BNX2X_ERROR(fmt, ...) \
pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
-
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(fmt, ...) \
do { \
@@ -147,7 +153,6 @@ do { \
#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
-
#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
@@ -250,8 +255,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -312,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {
@@ -366,7 +401,7 @@ union db_prod {
/*
* Number of required SGEs is the sum of two:
* 1. Number of possible opened aggregations (next packet for
- * these aggregations will probably consume SGE immidiatelly)
+ * these aggregations will probably consume SGE immediately)
* 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
* after placement on BD for new TPA aggregation)
*
@@ -387,7 +422,6 @@ union db_prod {
#define BIT_VEC64_ELEM_SHIFT 6
#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1)
-
#define __BIT_VEC64_SET_BIT(el, bit) \
do { \
el = ((el) | ((u64)0x1 << (bit))); \
@@ -398,7 +432,6 @@ union db_prod {
el = ((el) & (~((u64)0x1 << (bit)))); \
} while (0)
-
#define BIT_VEC64_SET_BIT(vec64, idx) \
__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
(idx) & BIT_VEC64_ELEM_MASK)
@@ -419,8 +452,6 @@ union db_prod {
/*******************************************************/
-
-
/* Number of u64 elements in SGE mask array */
#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
@@ -451,7 +482,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
- bool l4_rxhash;
+ enum pkt_hash_types rxhash_type;
u16 gro_size;
u16 full_page;
};
@@ -493,11 +524,28 @@ struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
struct napi_struct napi;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define BNX2X_FP_STATE_IDLE 0
+#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
+#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
+#define BNX2X_FP_STATE_DISABLED (1 << 2)
+#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
+#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
+#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
+#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
+#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
+#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
+ /* protect state */
+ spinlock_t lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
union host_hc_status_block status_blk;
- /* chip independed shortcuts into sb structure */
+ /* chip independent shortcuts into sb structure */
__le16 *sb_index_values;
__le16 *sb_running_index;
- /* chip independed shortcut into rx_prods_offset memory */
+ /* chip independent shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
u32 rx_buf_size;
@@ -565,6 +613,138 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+ spin_lock_init(&fp->lock);
+ fp->state = BNX2X_FP_STATE_IDLE;
+}
+
+/* called from the device poll routine to get ownership of a FP */
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+ bool rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_LOCKED) {
+ WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+ fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
+ rc = false;
+ } else {
+ /* we don't care if someone yielded */
+ fp->state = BNX2X_FP_STATE_NAPI;
+ }
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* returns true is someone tried to get the FP while napi had it */
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+ bool rc = false;
+
+ spin_lock_bh(&fp->lock);
+ WARN_ON(fp->state &
+ (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
+
+ if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+ rc = true;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* called from bnx2x_low_latency_poll() */
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+ bool rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if ((fp->state & BNX2X_FP_LOCKED)) {
+ fp->state |= BNX2X_FP_STATE_POLL_YIELD;
+ rc = false;
+ } else {
+ /* preserve yield marks */
+ fp->state |= BNX2X_FP_STATE_POLL;
+ }
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* returns true if someone tried to get the FP while it was locked */
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+ bool rc = false;
+
+ spin_lock_bh(&fp->lock);
+ WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
+
+ if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
+ rc = true;
+
+ /* state ==> idle, unless currently disabled */
+ fp->state &= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+ return rc;
+}
+
+/* true if a socket is polling, even if it did not get the lock */
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+ WARN_ON(!(fp->state & BNX2X_FP_OWNED));
+ return fp->state & BNX2X_FP_USER_PEND;
+}
+
+/* false if fp is currently owned */
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ int rc = true;
+
+ spin_lock_bh(&fp->lock);
+ if (fp->state & BNX2X_FP_OWNED)
+ rc = false;
+ fp->state |= BNX2X_FP_STATE_DISABLED;
+ spin_unlock_bh(&fp->lock);
+
+ return rc;
+}
+#else
+static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
+{
+}
+
+static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
+
+static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+
+static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
+{
+ return false;
+}
+static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
+{
+ return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -580,12 +760,10 @@ struct bnx2x_fastpath {
txdata_ptr[FIRST_TX_COS_INDEX] \
->var)
-
#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
-
/* MC hsi */
#define MAX_FETCH_BD 13 /* HW max BDs per packet */
#define RX_COPY_THRESH 92
@@ -613,7 +791,7 @@ struct bnx2x_fastpath {
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
* PARSING_BD2 - for encapsulation data
- * Frag BDs - decribes pages for frags
+ * Frag BDs - describes pages for frags
*/
#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
@@ -693,12 +871,10 @@ struct bnx2x_fastpath {
FW_DROP_LEVEL(bp))
#define RCQ_TH_HI(bp) (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
-
/* This is needed for determining of last_max */
#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b))
-
#define BNX2X_SWCID_SHIFT 17
#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1)
@@ -712,18 +888,15 @@ struct bnx2x_fastpath {
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
-
/* TX CSUM helpers */
#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
skb->csum_offset)
@@ -766,7 +939,6 @@ struct bnx2x_fastpath {
#define BNX2X_RX_SUM_FIX(cqe) \
BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
-
#define FP_USB_FUNC_OFF \
offsetof(struct cstorm_status_block_u, func)
#define FP_CSB_FUNC_OFF \
@@ -900,14 +1072,14 @@ struct bnx2x_common {
#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
(CHIP_REV(bp) == CHIP_REV_Ax))
/* This define is used in two main places:
- * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
+ * 1. In the early stages of nic_load, to know if to configure Parser / Searcher
* to nic-only mode or to offload mode. Offload mode is configured if either the
* chip is E1x (where MIC_MODE register is not applicable), or if cnic already
* registered for this port (which means that the user wants storage services).
* 2. During cnic-related load, to know if offload mode is already configured in
- * the HW or needs to be configrued.
+ * the HW or needs to be configured.
* Since the transition from nic-mode to offload-mode in HW causes traffic
- * coruption, nic-mode is configured only in ports on which storage services
+ * corruption, nic-mode is configured only in ports on which storage services
* where never requested.
*/
#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
@@ -984,18 +1156,28 @@ struct bnx2x_port {
(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
/* slow path */
-
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
-
#define BNX2X_MAX_NUM_OF_VFS 64
-#define BNX2X_VF_CID_WND 0
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-#define BNX2X_CLIENTS_PER_VF 1
-#define BNX2X_FIRST_VF_CID 256
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -1008,14 +1190,14 @@ extern struct workqueue_struct *bnx2x_wq;
* If the maximum number of FP-SB available is X then:
* a. If CNIC is supported it consumes 1 FP-SB thus the max number of
* regular L2 queues is Y=X-1
- * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * b. In MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
* c. If the FCoE L2 queue is supported the actual number of L2 queues
* is Y+1
* d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
* slow-path interrupts) or Y+2 if CNIC is supported (one additional
* FP interrupt context for the CNIC).
* e. The number of HW context (CID count) is always X or X+1 if FCoE
- * L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ * L2 queue is supported. The cid for the FCoE L2 queue is always X.
*/
/* fast-path interrupt contexts E1x */
@@ -1045,8 +1227,9 @@ union cdu_context {
/* TM (timers) host DB constants */
#define TM_ILT_PAGE_SZ_HW 0
#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM 1024
+#define TM_CONN_NUM (BNX2X_FIRST_VF_CID + \
+ BNX2X_VF_CIDS + \
+ CNIC_ISCSI_CID_MAX)
#define TM_ILT_SZ (8 * TM_CONN_NUM)
#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
@@ -1068,7 +1251,6 @@ struct bnx2x_slowpath {
struct eth_classify_rules_ramrod_data e2;
} mac_rdata;
-
union {
struct tstorm_eth_mac_filter_config e1x;
struct eth_filter_rules_ramrod_data e2;
@@ -1085,6 +1267,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1098,7 +1281,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -1119,7 +1305,6 @@ struct bnx2x_slowpath {
#define bnx2x_sp_mapping(bp, var) \
(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
-
/* attn group wiring */
#define MAX_DYNAMIC_ATTN_GRPS 8
@@ -1214,17 +1399,24 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_GET_DRV_VERSION,
};
+enum bnx2x_iov_flag {
+ BNX2X_IOV_HANDLE_VF_MSG,
+ BNX2X_IOV_HANDLE_FLR,
+};
struct bnx2x_prev_path_list {
struct list_head list;
@@ -1375,7 +1567,6 @@ struct bnx2x {
#define PCI_32BIT_FLAG (1 << 1)
#define ONE_PORT_FLAG (1 << 2)
#define NO_WOL_FLAG (1 << 3)
-#define USING_DAC_FLAG (1 << 4)
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
@@ -1388,10 +1579,15 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define TX_SWITCHING (1 << 18)
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
+#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
+#define HAS_PHYS_PORT_ID (1 << 25)
+#define AER_ENABLED (1 << 26)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1417,10 +1613,11 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
+ struct delayed_work iov_task;
+
atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
@@ -1468,7 +1665,7 @@ struct bnx2x {
u16 rx_ticks_int;
u16 rx_ticks;
/* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT (0xff*BNX2X_BTR)
u32 lin_cnt;
@@ -1511,6 +1708,10 @@ struct bnx2x {
struct bnx2x_slowpath *slowpath;
dma_addr_t slowpath_mapping;
+ /* Mechanism protecting the drv_info_to_mcp */
+ struct mutex drv_info_mutex;
+ bool drv_info_mng_owner;
+
/* Total number of FW statistics requests */
u8 fw_stats_num;
@@ -1537,10 +1738,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1556,10 +1757,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
@@ -1585,7 +1787,7 @@ struct bnx2x {
struct mutex cnic_mutex;
struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
- /* Start index of the "special" (CNIC related) L2 cleints */
+ /* Start index of the "special" (CNIC related) L2 clients */
u8 cnic_base_cl_id;
int dmae_ready;
@@ -1699,7 +1901,10 @@ struct bnx2x {
/* operation indication for the sp_rtnl task */
unsigned long sp_rtnl_state;
- /* DCBX Negotation results */
+ /* Indication of the IOV tasks */
+ unsigned long iov_task_state;
+
+ /* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
@@ -1720,6 +1925,10 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
+
+ u8 phys_port_id[ETH_ALEN];
};
/* Tx queues may be less or equal to Rx queues */
@@ -1754,8 +1963,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -1853,9 +2061,6 @@ struct bnx2x_func_init_params {
#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
-
-
-
/**
* bnx2x_set_mac_one - configure a single MAC address
*
@@ -1899,7 +2104,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
@@ -1920,8 +2124,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u32 *comp);
/* FLR related routines */
u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -1937,6 +2141,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+bool bnx2x_port_after_undi(struct bnx2x *bp);
+
static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
int wait)
{
@@ -1957,9 +2163,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -1998,7 +2203,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
-
/* DMAE command defines */
#define DMAE_TIMEOUT -1
#define DMAE_PCI_ERROR -2 /* E2 and onward */
@@ -2062,7 +2266,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
- indicates eror */
+ * indicates error
+ */
#define MAX_DMAE_C_PER_PORT 8
#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -2079,7 +2284,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define BNX2X_NUM_TESTS_SF 7
#define BNX2X_NUM_TESTS_MF 3
#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
- BNX2X_NUM_TESTS_SF)
+ IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
@@ -2100,7 +2305,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe))
#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
-
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
@@ -2137,6 +2341,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ATTN_HARD_WIRED_MASK 0xff00
#define ATTENTION_ID 4
+#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \
+ IS_MF_FCOE_AFEX(bp))
/* stuff added to make the code fit 80Col */
@@ -2280,7 +2486,8 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
+int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2338,4 +2545,13 @@ enum {
#define NUM_MACS 8
+void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+ (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */